From 642c81beae4ab6dfc779614748257813346b1feb Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Fri, 9 Nov 2012 18:02:39 -0800 Subject: [PATCH 001/136] Trivial change to test --- .DS_Store | Bin 6148 -> 6148 bytes .../UserInterfaceState.xcuserstate | Bin 97934 -> 98898 bytes main.cpp | 4 ---- 3 files changed, 4 deletions(-) diff --git a/.DS_Store b/.DS_Store index b4098670cc46ae0ea48c1daf569e33b33e845de7..361ab67534efb94b6dcd4539d6dc13010ead094e 100644 GIT binary patch delta 40 wcmZoMXffE(%FN8nsXN(^S&J_>-^C@Rv?Q5<;mqL_mdyv5-B>oWas1;40QQs&lK=n! delta 40 wcmZoMXffE(%FN8P@a|+kW-Y$ld>5CL(voBbhBF6xb2cAjc4OJh#_^9I03*l_XaE2J diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index c10b834c5cbb97c4f2876554fe110eb62b6e79aa..c6b054ad3dc6ce046093b101dc83ddfbf9e020f3 100644 GIT binary patch delta 37570 zcmZ^p2Y6IP_y0fl=H3O8?YE@&Ms~BLKtdoPw9q@Dhhl)J_)-+XuG!I7UmFUJ8Y>by z2!diSfW0CJiXtNR3JUgy{-50ig8uzco@ehl-*e_WGiPSLcXn?$rtki*W3e6gmc3VQ z{i{IRANDeNGVoIlyap)sZy$yTBWyArwmYrD8rQzQc*5dl4Y|pK~-05*{5t# zloYuruD7y!%g#7O4K9dZ7=K>;Me)t?;dl{$UHtX&i{fvHzcF6MuZUk6zbbxp{F?Z+ z@$2F@#&6zoOus^De#G^JJHhR7``xxX*`4mra_6}V-9dLJce%U5-OXL&?&a?59^fAA z9_}9H9_yaqp6s6Hp5Z>-eU^KU`yBTI_XX~Y-OcXH-B-DR#?% zy-(*2413-{OVZ{6Rye{%oo zKIXxrdD?jrJZ_KQV|lVYIi6fko+sZ^;3@QU^b~n2Je8iVo+?i_Pj^qXr^YkTGsrX8 zGsH9AGr=>_Gil49_RUK3o4$R%cYXVPANoG>edhbxcgT0xcg(N&RlmdE&Y$3S`n`Ui z-}2l36o0Be+n?hv@OSce_ILHy`1|<#`Um)j`A7Q4`6v3P`DgfN`RDrQ`_J=V;t%_; z@?Yb>(J%eC`|t8E_pk7;^l$KQ^xx;-nc4GjyYa;WS)^+wdECMn|K_2pOG>a-*}+)u=LR zj2=dBd1HK0^M!^mE;p_)t~0JT78!RKON~}zm9f^?U~Dw*Hy$t^HXbpaFrGBF8&4b0 z8!s5I7_S;Tja|kk#;3+-#^=Tt#+Sxd#@EIn>Sy>|^$AHtWoO<{-1doNP`pr<&8u)67P5x;ev~W6m`f zm=~BAnwOekQ<(oXuQabQ7nw`UyUnHMJ?1jA)m&w+HXk$}G9NY{F(38MF&{IxncK}5 z%ooia=IiDjbHDkXdBA+%JZOGyeqnxT9yY%>f3dI>OSR&y1j}hPn^uOEX9cW~Rcdv% zx>!|K539G;&l+Tnu*O)Ettr-2>vZc3>tgE?>r(47tJw-$!n)kL!n(z})oQVBvu?NU zu;aEAFW@k->pCG_I3w5-cGO+?Ib(dPO($%Y&*v;unX;wU1s;S>+F7Z ze|vyE&>mzDwujiG?9uiFd!jwfZn96eXWR4b%j{-5Y+r3(W8Y{?`)+%weUH7&UTv?p z@3rr@AF;RD+wG_Am+e>VSM5Fan`7l&&R)tydBAyV@)V){B(z_J_NUO(gq|++IuZAs zi2H8K^{!D$yQMy=C;OUYJp zlw2iG$yW-LLZxGPRCsi_K0GEoHaspoK0F~jF+6EE9XOpcl%On|ovkXJWcHL?RVkB= zIr*y6MNUgDQ@Y9}$%RUHxp;b>s`QWtlLsojq-f5R%j%QmX(@)%S7zTfP*wWNttpv_ z9Z2A5Wl*@fRT&(fd?NiYnLQ;<^Ws&u>`$5KNK#cbjsgmlNC)l;uR5gz?}fbK>b0s* z^{a+zs+MZ2iE2{#?(ov^J>g~H*6{N1itx(giPcoum{Xu+$Z5%?s+ujABp0Y^UX-d@ zC`&Kytf~RotJfe^4atUGh5G57p_WE?#z%Roox|13w!D!$E-pzi)mtKr)Y~WsPYu5u zW%NpTiu|@PU(Pw>VMARWu3i>tHi4(rRpIJQ-eFJ?@MxYm3~fQz+LT zXBKys+!icW=0wLe?(FEe#+@qx&XevHg>p-AvK+p`t;Ss@9XLl`CaW~`usEl7z+%E2kNS*@eS(7BkuCZihS(D8E>frN+GyEuFX>vTSXc8n;UxEh|^{$dm1e7(G*@{#Phu27BpRAzN9QofKq*QLq()+MNMU&|-f1=P52r6^A^sAgF2` zq_{Un)ts{T!g5vf$R!myieLV5Z_vy@{jvFsoDjy_<<7HtHvQ4A1G9{%jDUYm~!a@ zT~tlTvo9H_YX6m`7n}0+OEP6;4_jvTNK&qq;VnT`TO>Db$y2q(a^9u6swU;;9%ag{ zQe2iScRlEkgC0y!wcDk;wp_VO_O1;n_sHJ0nev8Ox3XM{%L1ylO8OoOsoGlU8<8oO zHCxi#oUCg1${QaVqiXj_(L7ky?w4f`k5#n?Wv^a?l}BXjQ<-w)!x_^1NQ$aGF3TS7 zB2RlHTh*S5dLlXI_0Cha%Nrjmm1p!`U0Eb|J(1K-`zlfp zm9lkfKqfzFsM_J<36f<-f4_1>mi4bwzL#5{9HDAIMK{=y{VjR+HvOgne&tuWbz70l z`d_N5{UKYQ8ZVWBon>%qhN`P_>ysf_Ixtt!W$RNR`SR9)s<)R*uI{Jm39@EeA60kD z@U|LN_sOl>>Qvp7W!Hw}j%zYiJu$-VNPkT>Z#PvvMV4JVRDLqJM0%f2Q}wKq{cDIk z=E|}my_5pkdPA(!r2{qj_0!p^UKAaq@H0W_9ok+gmW|JxuIeRH3>_u&hXv#Z*LPI) z3MuN#<+5ipRlQ0+{cKNFua-*}O;Yt*DTbEI#m{A^dT+V;xq4NvldU(5R`mhWH)6Ol zSe`awqB88160VVP%m3w2+7x845|7wx%J6dy3kAMsy;p1?PWvb<>mP>RQ=$^`%(LQu|IaAgDBbP4fBB#Dwpz3pPx-u4gcD+|QM~eE<$^yCM>P-2; zv!)#JieJ^wmw(COs(z6iaq~n~zf?9p6A5t3p0~7<@84W57mszw^0Cw8!LhomxHdKF z+cq{!xk|R)P%bCEmL0 z^;@EC_qr`b-aX!>+!pOihu>j1Oo!#L9f^)4N3u{>3T2g0Rtsf~P}T}%olw>b~96)=o0Ocqifqi@8}*m=)atLj@rW^ z9XGd8e@-Yn|4IGM2=%)hcRQ9k?r|)0v^thMRybBVRte>KxvVj@!wHgWVj?Lis+zQ7Auz zr^=m$Ir7}>S@NAT3gpqkz?R%I>&n}`DAbok+}&G_8g4~ia%Xk^^Lyw(JPA1CXT+Z# zu38>HGrmcv&j|I|<-8hyX1G$Q&&dOKX3Hls{i*SD!8{a9GkKI+6cSZdF!Zj=SIsVG{ ztHO0c-67Q1SHxc(e@*bi{B(0b8_Mncsl-ph=zN5%d(}HD2{}466PnIyCrSe5LG!(ZVLw0gbSmy zn{Y`~xg|8q1ItU5%cXloN99U+Lvg;EaJ8JeqCmMWs@f86ke*OLO;{3@Z3#C^*UBzR zi`*F+swUha7q6^T?v^jE?5Ql1?N)`96;ZX8uv(tEs*__~WIH+`)>4%X@{Ltx$|kwE zBv+1Hoe`C42@lDGB^}j-N984}J1S2^rCP#Pxqo#hv!z^<`ZuAL z@QTbXD^L@5$S2nXm0j|SHQCCZNWIGGYfZVUEK^O`Cy$nusR{4NmbC@S!DtaDd?b&o z%~w8^ua_692@zG+QTbYCb?&Gpd~;l)IhCXGiFHBc2YFBDTs7fm>0Do~{3d&@FIJ94 z^_denqDyDh87E&`pQSkD;q~2=4)S1^pz3r+^_kNn#l5+TKPu0hmRwSitvZwBANOV} zsWM|jo{|w&XU=TdsWL}(=E*xY1eHR0S!GCd2BPxJ8Isu>^OREgXJx+X>@4SO?4(r6 zyEYam-J;^mStGyQSg!PpYBOgaIr6@MqhDmaPDryf)j2S#&74D`+RQmTx_F%t6&BNG z&WI9=XfsP0AC+d#N%G*PQe}$tZVoA@$=c1Gl^Ic8<~%*B%bc_1j?HDtZ28yb&dR)~ zDs#@4+0{j=b754LIWLf+x=eLmEdRK_M7c~BKTxU&Ipu+3x#s~(E`6Xv`Cn9!Ij=n_ z$S&Iw5oC#~bFmC>DORN1xFuh?HLAy)x692vN>%4wrwB5qa*uS^mdS@6%$BFsx>VfHG^LFU~3{|PeZTPN8?1ex=lzX>wu z{!;{*p*r6`saUe=(p2Y%vbL_j>ii_S{W?FFJL`tXRZj$EuO|}Z$bK%>c{nD`k{w4P z^>9L&nacOa^F)N1Pj&tz%lh|KoxjShPmWan_`5Rmsm{MnN;8)#Tc4UB2X8Hx!L6A} z`{T+iM|C9}mu5l59aUy7pRC!|S21OHTMs2sZr#>TiKwrbEOVu|$udjHil{Qxm3y3% zC3g~Sus-&ndipn!r z^>KNYrPRu&pY5gej*2r^-Enc2sSJ>tpBtkLj!HAvFzFjHOm&Tvr;V7Py6XRSthvUW zBF<8kiKl3@6lL-$(#)s2ru{>kc^uOt^Ke3%byAv6QDzy+nb9t{2{T*yj~p?&R&mL2 zeP`L>>Q_Tmf?wmRo51|`SyOQ>tWfj zD>r!Z#B@F8dOTdSY+0M(Dc2|AnoZsU*H+gy*LK&_a^J4HI1zV`%ztBELd5>5>oeEq zt}o;rZ`3u5xUWUr4WyG&@83+*bQT`RO3gm#nAZWY=cLR%`d}p zr_soCw~LupI7P%Q3v>LCcKafAP$iG}Q}30@58iaN)V5cgrn@MHSlfmO$zyv48l^G& z`Zjx)Xyk?i?d4f}6O67g|AsbywcPY(pQPHD{k}GPp9rCH0v z*j}H}81p^a=4+A%_Kq^njM*P=v(J{j->y%Z7qdUvW7-jU#f9a%=bc@?^e0}tuAu&zBJ?ZnD3=F-`(=yzTU>NnEjPD`$~CWZ%>)^fiBN~ z*JZ4U`Cn`E-y385(z|}+zL@j%Hs=G;G0oWTGaichcD4B)lLz;e=QK6W>~zXxJ{g1U zZi8))<@dg;%h&e1jAvtjH`@R&%0v76CA|`}?`^Zc9vP|ef2;KC-^(!GhymYe1HL7n zdaurSCuV=Q&HkR8;_DJwC6SV8FB=ZHk`Bf|@3nzGZY$ai2dt#eVu1JC0MR>*Atz~! zJm@{Ocn`<$AGG0*%HHo!P5L2b|ESIWORS_K?++ippKAOb1ANj3_)Gc@PBzq-{j-yH zk3)tJ4mUc)>|eCmUGl?&BMfiM{#BdZln3{f$ps&HOACriW~5phSOx9{J}{a;-qLmw8E1U;pZxMEMp)5%jJ;(ioyKZ&@X zMcgkdJ!PJ9LY^)n?pG1_TQu%>x&Nz+Z~7>^q}J2-{|u}1^dsaMAmWaRxId#|f60*_ z1xto`Mn__Xdq#LhdPWIN5t=HrIH74PJ@uY3ggoPfrVGsxiP73cD7Jfe*x_~`-@H$5 z`KTS-b5xm{>ULQ9TfQskUZ$~yk*NUrj%CgfQw zv^1fmN5L8Lg-?qA9sHo@Awr%?k-#rhNK;fS>g|N67Pn(DHfNDj{#W(CUQNFB;H4xA()t4i~hHdvx-}WnH?K7H5@psi?Yi z;>BfME2@gKN-sUB6TG?Jj{ny(@D_OkguKN<8z{6vk;JMtSRVYMTjo%q4HMdMp^aGS zt@3swN^cyu*byL1+`BxJgmmm0xwu zoGi2{LYpeIX)C=`z0+cKcbd=|Be>`qTAMjTXfuV@B(&34de8QrL&$rs(9RIrnbBOc zqQ#tcDDbZp;=RKAUqarig!Uhy&5j1lk#i4Ko?4&Yo4k^c_ZFee6WZBP%sKMeL#6*( z7~WRzazfsfLOWMz3!>nKGWBq`za`x4y`PYGi_p#&+67U}h4S3PRe$TrHt%*q-e-h% zvCuAwVlI`h94`6yMql^tB;^1~yY{&nblV}0WY z`6dW$h0s<;!K-A~qyI+-XZo6A#l1#oYa`&e-{sq~w*T`v0Zf_6`}`koTn)<~Ev`Kd$tw%9GRhy zbH4k0FHj(~r=#)P!&Bsu@BSu_3aeevk}#wz-PYQkrwv|?YXx2w|w{c z-j2k-5RHF6JY|}<&ke82nt5Zb#!`%q|~3GEA^ zeIvB*+9v2PdF?ML(*H}R_F8OC-jGXvG3Df6LP`gj_G^k<`Aetv?wIQ>x#Sl^9{#0M zdn0CeJ8G!>wNv|~m|O#-v-pp{ zi@%~_c>SQcGn+0L*|^}$<^D?Mooa}os{GyLlHXF}y8El;rr)Z4wf){(_d(x{FBdMf3aT+(_UzwMhU%fDxncVJLqk9azgy0{bSm4j}_YI$IHk+ zAv~?^HQ3+apV9`KDzq22w+YxXZ{%YTE=zCV#aau4snNoYU(GykprVt-OJ|Buo9`%cY&XC%L)E&o#g zvPk-Sg!Xf*zg1|zOc_+yZ+PRJruqf*XU#ohVxvqw=1*PiUl*=f?qB0yE41H)_WN@G zdjGvb`$K5QEuq`1{l6)u{}2DM$ZuwZo*14g^dzCDM1J=(&6^V;-?Bheu5NbF!Dwf+j}+m8 zS#z3ldp|t%($70?dQRxc(NZ+xjf7ayH7z&dnK#XAPHZ}NLDT%1jTdG$F39TKy`my( zD)jC`uN8XlNZ#lPWfVj=G(9tnocd0Q?D~$c zgAt6`vRiz*620Cq;$xRGdTto`*PecEqap^$4|B@3L{fbBn7J^FJh*pw2cu`~xhRrp zXv-E|@q2tZ>zGW3tg-=rPz?#(#ufCG>7lK+Am&rCrK7 z5qEVo%K~Ge&})R=aKbK?6?V_D?xqOY+c<{5bM9oty*x68EzV|@%UIE=ji&7N*! zQ|x(YWWG*(XK0#jZ9N$C4-X^1{eH0#tBuEE#*vYrs05JDe4m(LY>k;mM{}3R$M$*? zj^Co_W5UR7@AXbFUW#GH9mizs_qvT8G24Vlx}MQ{hx-pEHb=fOwT<1z{&3A&V~_Es z@s_dIc-wf#*k`;e^hrW*5c*`HPZ9c5p-&U~X+m#YOUO82yiW*^@h*AB#}Ubo3o?yTvvo2H&{w3K#GZf|jQ zP*UQid23sarYXrSA9qj+HT&X=OIwQKmDHkd!_!(LT-pu{<9p+e2;CoyAB~@kpN(IP zUya|4-&=~EO7d9a7IT1EctT zp62xqs_R#O=DhQVH=Z}^jK&4?=0`U0eluq+m^Z)o{HDff-cXF#U-8PUmV4us)4${a0pA@s|IeudEgEA%UcewEPwx5gY}jy1=by8D`6)xW*Kf`=o6OVcV4h)~Y0fgwGXK*OPEp1>rO>Yv z`n5v8B|N3&!xW`kFZ1m1v=!z#=6v&9p1PHyzA{=6UA$NrPrK&0R3-^jS^w zg?@w37oD8C=+R(aWM14dDpkqS%uART`IOmwBDOgiuy|bkyoK{;G@YWZx(S|W67^IubbvI>c)+!n>S4RF=6&IsRpvT#y?L*> z!Q3eHr9!_)=*xuOD)i;6%uVKI^M1uNw+MZO&{qn5b>yI4+tMvt8RavdFrSLl-IGFJ z)vB1`sV#SAD+RsGXUyj!=4XYzCh}W`Dbu{ZzVqguJ8yPV|5>vaG|eA2Z{{gQ{j&LL z1pSK8*R`6j34OgBm6_Vb+!d}~ZoXme7W%zH-`qAlZ<+6etCpL4&9{ZVLFgNooBPan zg?^vVH${E`(o&P7WTlxOnxFgw^{M%p(C-)e11%TiC>hP>*XE(WeZypaYaVUm@tx2g zI)2ueKSr4RWd1DlhlT!F8%`;nb#wgDDvx@fJ!Qd+`nMJHH^R(RhDZKeX;w{`NS|I5 z`U@wh^bAjHwY)-qD!R(7K33nBZ_ul>Bp}!&Y-9q0Z z^f!h6meBVK{cWMYv(`G-T3{`-&a=+9F0d}NE)x1ap?@s&BSJqa9I9|Qgu^Esfob0K z2{Y!+Z0aiKe}=5 z8BKlXEu6bR=!at^wB34o((!lS`lbcTt!J2b%Jc5y!Osc(+Zg@@>&0naU+k@S`s}9Y z?LmFhf)&H6eL~g;QN{JU(Ekv6^y?G-FX13!bSN#oe9F7c z)*#L)H&5d~9ZZJ~1=aA?vr34(pJ0 zEb?%o&}|!A3)if$61-%g>WS8! zcDkJzu4=V2gu~ftX9G7lXi8nf%;=s&Hhsxf<;m5!ZO3FKV@C3P;w+6F#SXrhQghoBt7xoD(kF zo@<{SX|_4m?m5Dd+vZzfUl^IAnP#(nzI}mkWC?w-aO4R`{z`jh^zWq+vH?baLsairv2Y2zA*AJ!csW0#?6}3IQ5J+u+zTIUew0- z2H_}b%d^D3>F@J&i+y{f=x()J?AwGRC>+JY5n4gWzSEB0bUQi;M`_DZTe&pTUT&{! zoBvh9QPP%ot-UT%C3&OmjrOLK$AP_BILeQ2Q1+H^^$Pnz`yu;b;pi+JmBP{OM7+~} z%zpCko4oy$y;V562uDRrT#}NXWXMYKG<%1=>mP_W?A^lA zT{x;+!bwV6V)XWHNYjN)GkedUaptV^n&!V@ziq$MGBsBzY<>cqPlNMyaJ~i31K|7u zoL_k;r9B|D8*E!%i z7hDU$bw0Rm0M}x0-2~09o59rrt~cY=EtxDSB)Ah^E(_gCOP1nzIZeFWUUg8O%H{|O$2W;_~rYQb{` zc&-4?M({iio+rVx13Wvy^9FeKfafjnybYdx;MotJ1K{}vJV(Iu9e92K&rjg_6+C}{ z=P&T8;Prqv6}-9N%?EEGc#FVW-VELv@YaI&2Jp6kcR6_P1@BYfeGa@kz`GZ`2f_O( zc)tSg5%3-Z?_c0kz!wKTC-~gpYXsk1@SP35`QTdszDvQ^489k^_a^xEg72M(?OpJF z48Bjn-x>V9z~2Y_b>Qz0{t@6G1^&ChzYhHOf`23UH-Z0A@IMYldoV0862V9YBNdE1 zFbcppAB_KkaTOR>gK;ew5{z5Gcn6w|Pr>*cjPJns0gRu(_yvsL!1x2qc3^e@GXYE& zm>w{FVD<)cHkfyU`3ji7fn|f03syc@9l;8K6#}aStiE6k1#381^9? ztp9*D2dsHuEdc90ur2`WDzI(`>khD1fVBav$H3YO)~jIc0Bbi`Z-TWKto>je0P7%F zUx0NKY`|^@wg>DauseZW0rse7uqS~%8SH6bH-bGM>`TDD3hbM}z8UOW!M+XbJHY-L z>>nUefy6jSjE6)gB)TEd3yF!4m<)-jkeCjM`H)x)iK8HKA|x(^#LFNt42jKGK;o5< zcoQVv42i9fxB?PaLE;)nTnCBwLgGe9d>j&=gv70oxE&IofyCz^@dZdc1WD~7sRWY7 zLefG=x&x9{K+-BmdK{9Tgru#Iv>lS3fu!f4Iq3yRdI^$Vfuz?U>2*kY8IQaeDZA5zO9 zwHu^Pgw%PEdI_Xn2C2=E8iv$Gka{De-Ug|6K{TR|dg|zP>-2v$yNN-Mq^bAPPh4g$#FNE}BNbdycrI6kY()&Pq9i$I{ z^g)n53DPG+`cz0i4brDW`a_Vu71CdY^c|4C8`9r|^u3V2AJPv%`awwl9MZpqj1G`t zLvuzFWF$jI3S<;QMiFFmfs9JXsDg~{kWm8}wU99gGKN6LFvu7I8KWSh9x|?gjQb$t z0AxBLvj#FJKxPwUo&lL>LFR18oClfbK<1T@c>`oFrkTuJAhQKBZ->m)khvBzH$vtn z$h;piABN0FA@gy_d>>-dn46;W+_9)1%hwRfJyLmcf&xGvL zA^S|oJ`1vEL-tLOeII1M1=+ttPAcT|f}BB+GX!!bL(WvlISq2AL(WXdIURD&gq*V= zXEx+4gq-ssM?%hBkh2tWo`;;*A!irl9ERqcpCIQK$Tc804RSLecOc}Bg4}w@m5_TE z%nCa(hWxvpe2>DAO|5nJq4f5}R{Cgn374la={{4{uTr=eFfc$qM|2@e65b{5U z{7)hOE66_t`QJeP&ro1MK^hcfKtVPX32g>EQJgu-MfOoPG;r{$P}m;|>!ENg6pn|&iBQ-8g;St#8WjE)3YS6Qi%@tNIvUWiGjyzh=8m<{ zu_tsK2ptDQ(Hl^-2a4W;qPL-F9~A9}q61KL5Q;v8qK~2IQz-fzioS%Ruc7EL6nzUt zN1^C@DEbkKeuko7q3CxgItE35K|p~(90YU-w4<3o2M8oUzy$#h1bh%MAYeft5dz5& zNQFQ;1TrCz4S`$;W@fxuh{oCARc5I7$K7eU}s2!tVU1q7~w zz%>xK9s)N);3f#%0)g8ga3=(oLZB4_DB?!C?H_@N5X43&Hascp(HY zfnYNPFNffj5WE_K*Fo?G2rhx(%@AyX;2jXW8-mLqxB`N!A-E2L8z8s|f)7COAqYMS z!6zWN6@pJg@Hq&+2*Fn(jqHHnE(q>{;9dysgW!7*JP5&$Aow)|4@0pBij$x?1&Yg| zxEhLkK=F7eJ`IYeL-Ey6yab9R6yFcUk3;d3Q2Y)Qe+b1NL-Ah_Y7e1!2sM{Os2V~& zAT$+1XFzBcg#HJi#Spp)LQg^H1qi(aq3oz90&Vd!)@bb16j zZHG?JK&MZk(>KuR2$Up2Nj8+^LdgIq841lLqoG7V$+b{&J(O&Lk|&{LE0nwgB_Be` z$55(5X#$kGptKlDE1Ffq5M@Se-q00LitfB{|(CjXok)y&^Z@6=R@bd z(0M3y9uA%7K!rx%7svQ z8B~U$xv~{1*F)t7sN4mW`=D|^RQ>^7b?Dj-x@JSy0CX*et^=X#DCk-bUC)QEVd#1} zbX^Kv*Fe{G(DfDQx(B+x1znFpl>@5ULsbx}xkGX*5&S2vs*i)ooC92UKl^ zsu!W^WvKcRs=kA&AE28Lx}`w3H0U-Ax{ZTw6QJ8=(CvTF?HcIzICOg!x;+owzJ_j* zxSybVGIY;@?s?FC7<3;8-6ufz%bTJ5bVf{bT6G*J??bH)wQi{OLTwq;c8A&;sGSM5bD(w})JE=qYVU;FyPV0uR`r1 zsQnRYe}Jb0PHH4n1Fnp07eLC-kzQS0eP90lj8Jues3cKIrub z^m+_>w}akZ=q00WEu>)1%@nzAY=ua^8FBqo4ut6|vGz=R9!|sA%t6kMjU_< zpTUSPU}O=DEQgU@VC4BQG7KXxhmpHs3Rv5b-#>K%n zCyaB$xbZOVG#ED>#;t;J_rbW$Fz$O8_Xmvo6UO&|@%>@^Kp6i&7{3_C-vr}dhw*R2 z_>9m~;h9x(X&e43l=lq=PW&LzwglO!^EaeF>8e!K5Ey($6sI518~9G^o(vfd(Hm z*wBy!4Jpu&2@ToMkOvJRXy^_NHPFx-8u~&*zh-C{3=Kn}VFWZxfQALoa49rIe3wJR zmC$f2G~5micS6HbXlR9o!_e>(Opb%eI!t!LWDiXC!DJIAr^4h6n4Arh^I&o@OdbW3 z8({MJFgXm9n=gmSSHk40Ve&07xdkS#gvqO6@>-a@{{K;M-_c!ANgIHlk&+-tCj^il zT;0!A*S5MU_I0gmuV4rWNJ8)Z2TBn{QA9zCh2A@g1r)m|NJ5b!J#-Qhl%BwM_;1cF zbKg00&b@Qyow+obLz8(lSwNHJH2In)-_qoJn*2zUpK0<7P1e(7BTWv|B%daQH2I4r zCuwqqCdD*G(-Jf-P17=0vm8RQhdWsi=7fbNsoxJz}FFwSJZ}8%Kyf~H@f9J)Wyttbei%Bm{da{J{GNh-F zo=SQR(rc3bH`1>l{d&@GB>g7R|4sU>q~A{Z1EfDp`lF;jLHg6AKS%m7(x;NXi1a0- ze@gmiq<=~JSET<$`YO_YBRz-o4GGeBlb%cZLDCPCo=5sI(*Gj;Bo_(;nq5M(3N))q zvlN=8(yTVk>e8$M&Hj&O_tET8nmtalr)c&p&DzkcEzR1~tP{<;((GHB{YtaVG)ruy z*>0NU(rh2i4$`cEW`EM`1kFy9QHP8)GVUPbeli{;;}J3*BjZIfnvv0tj1FXUBBKi# z-N@)cMlUjkkTHym5oEkc##>~JB4ad(jEQ7?NX8N}J|*LGGM1C^H5uQM@dFt@lkqDV zzmbtc##S=6k#U@ib2LZunlx`n^F}nkljaZ5{2`imp?P1L_ow-InoptmG@7rZ`6>y` zf2GC6w5URhYP5Ko7U{Iepv6bDm`RJ-wD^G*ztJL_7CE%oL5p3q*iDN)v?!p(Fq1_x&day* z^1Zx#A20uxmmlNhCnU7lM4PR&*-o2Xw8^E-e%c(OO&)CuX!9p+PSEBwZHj1fp3IBL zEJbEAnPtf=Pi7@DtCCrr%$j7@CbJ%y4avNW%qz&elFX~gyq3%x5@g;)<}GA4Ci6}* z?;-O(G9MuGVKN^j^9eGaCi6Kmn~<4KW^*!IlKC>3S!A{+vlE$J$?QR9Z!-IlIe^Sp z$s9uFFfvDwIg-p#WG3Dvb1a$T$^3xKkI9@u=5#V=kvW&l1!OKJ^Aj>ZBlAl#zasM+ zGQTJDCo+E_a}Altkbw61Tk@Y{a9+!~y6j{%b^#WNhl9fT$OJubsE0e5tWOXE~3t8RC>P1#xvJzyy zLe^liUL)&uvfd=?ZL&s_HHNHlWKAUNBeEuwHI1y9WX&OKK3Nyy|E#5CEhFm-vR06_ zlC1B@`jMPtTSX4(-v(@ z(6%&f%h0wQZ7a|=QJJ>YXq!sgTC`20?Paw6FKr*A?Gv=kqHR~&cBk!Iw0)np<7vB+ zwyS9SD{T+a_D|X#r(Gr5)ui2}w7ZFRchc@|+GWwMEA6_|ZVc@{BGGOV?Y^en&$Ro6 zb{79&k+pVKXyZd>T~2i;E4?L6Is?pM(LTDo6P_lN2J zG~J)2doQ}bLia&*pF#J9bYD#O^>p7x_Z@UUO^=Ht^e9P>di1!09{)#=|Iy=FdOS~$ z?(|5|V<0^y(PK6}=F(#WJ+{+hCp}Bhvm8Ck)AK5N{)?VB)3Y@_JJPc=J>R3}hxGiI zo-66Oik`pH^I(FW$LRSNy&BQ$DtcW*uczsiPOl7ljiT3hdQGI)xAgjzUTf%8NUt;W zDx!BIdS6BFYv}zvy<5<`CA|mGdlHV8yR>z1Mg(uNCu8& z;5Y{EWnex73wY%+Ub&iA{>dvHd8Jo^SNibEa$fm?SAOEv61-ZDSIhJ2`Z-(?`$Ttl6g(0gMTArcR8Crv(PcpPA zL(>^Lm!Y39bQwd>@mdnEmEpDj@!GSz_B^l6;I)M$UR%s-M|tfOubpAobqu?eVT~Eq ziDA7N)|X+QGVE)HeZ%ll3@^{{iVXiR!yjY#6AXWk;U6;mV}|cy_#uWL;q@zd{RUqD z7q7SD_3pghlh;>DczqSG|H_D}jHu0sx{T<{h{22)%7`_L_?;1(d80mW{GB(h`+wx|6-dx6;D|z!>)HX)#W7GlOxtVwF;+=bVr!Vge=AEIu z^BeDM;+-vwPGNLiM%QO_D@J!&JUTc<(jd`-S(`^WH|rRAWpX#?)g>I%8gDOeSNdF=jqv7BZ%g zF=rT4#Mqk{dnaS>X6!4BeVwsyF!ndbZer{f-cRHGe@J-$YToa|`-6CY2=D*M`)heW zhjHZ@SDkS+824|+-N(558TSC=9%S6JjC-DOEgAPR31e=W>O)O&M>Kn$tg^(d*RKg&*b}<{3w$jXL3g- z_hND%CXZqAM@*i?itZ8kg4sN+JmXRnEEkOXEAjSQ`a(eGgG%R?P8`?VOlk& z-O99knRXx3GMJXhw6;upooS<)_8!xgGHnIZzGm8XrtN3iL8ez_dTplHO)&j#ra#2= zN0{D;>AjiWm+4cPK9A`On7*3n8=1a|>BpIVjv1IypBaB=#+A&tn;8!=;}K?LGNTJK zx-nxSGo~?P1~b+&V>2_hGUFsO6PS4sGp}IgwamPpng3(vv&?*+nLU{~keRPC^J8Yt zV&)uX{=&@l%-qP#znEFf%=65u$E+)u^?%H|msyW6>rrO)WY$1ty~?bq%$mon1teyz zW!7e9ZDn>TW|wDnMP^^d?5mmmPi8k`b{l4AG5ZZ>zsKyc%>JC&-!l7qW^ZTqer6wJ zPG#oQVoq)5+`yd1%(;U(&oHMMbDA@!FLMS`ZIiE9U9drI*&UWUUXKpfc zFJ|sl%>5U0Z)WcE%x%Hkmds5s_ci7YXYMrS&S&mI=5Ao_cINJ6ZV~fJF)xXEmox95 z%)5?x7qZ>+USeJ=<_%-sDCUi3-V)|5XWm!L%VXXN=AB~xrOa={{J$~(KIT8l{KuK! zf%!d|-<$cfnZJbjpD_Oq=I>?xeio#%pgs#4vfzFeJkEl|lPu`Lf&naeg$1)&u!IGl zuwW+(4zl1d3rn-G0t+j#@Fo`C$-=u?*p`LeSlEMwlUO*Lg>zZBnuQx#xQRs{r>d2z^So9%_K4#IkEc%s2Ygl{FJ|Sv-Qp z@3MFdi$7!WH!S{+#rs%%l*NTCDaVp(EUC_tf3oD?EV-2>Em+c)CG90F8Of5dEE&g= zZ&>mROIEX_fF-9{a+alaSo$}XUcu4_So#D@pJHh*mcGK$K`i}=rP(Z9$0v38NtT<8@2 z`Xau*jj#X1*Z<|~*Cl*Cny=sE>ot7+J6~^R<)y4_#LB<1vNbC^va&NP7qRjSRxW4d zpR6omdYw>dnes0Up?fLmDe*TG{SF!3MR+VMd zC9HaiRWGut8HrUBST&VZ(^++dRmWL%l3(uQmq+>KaekT4FU$DlbAGMDub1*`9e&N= z*GztG%dboMbp^kE&FT`YF30NftbUBu&$GG-tH-l?3ah8FdM~T>g3cUr#3nD$Z1HM}wKhF9m zS^o^{pJRP<*1yF139O&V`q`|X$NB{c)_=zOFIZp5`g3f+h7xQj#fI{1sK|x~*zgP+ zo?}B3HoVA&)@*3QhPiC`oDE;H;VU+*WWy>p{L03PY^=@3x@>H~#xyqmgN;|Su^SsN zFyR{qv2iFHhp}-K8%MKo6&rtN<7PJg!N%=u+|R~?{C*X`-@@;=@%tV8eiy$#$nOvH z`)GdunBOP!`!s%^!S4(CeKEiP$tG;Nh)t!~ltf}vMK)Dt)BS9EnoZBL=>;}5Wm79Q zz09T$*))eu^VqbIO^ey|C7V{TIf>0x*_^`W8f>n`<}@~6#^yF`?#||(Z0^J6erz7X z=GWN#E1Ne<*u0g^+u6L6%?H_hm@W0#@(;FL&6aD~ay?saW6SMq8N`;i+42rs-eb#H zwtUQ%$!yuemi=rw$d)5)$!E(+wwz|`)oi_$t&Q1wCtL4k>q7~)KEl>l+4>e+N3r!? zwvJ)zM{J$M*3E3)$JPUEJCQgJNV;n{&v#mY#Yk9m26wX zwzX_q$F>b@+s?L~Y`==_x3K*-w%@__yV(99+aG58hisq2_IYey$o9o-|B}S^6>LAp zj}t%e2if%yyPjrO3wE_+S9^AKVpmso^53A?zB)t`Y1S$*x)K z%1*Fr2fLHlU6I{2*}kiIj_m2hp1$l!u;&%_3}(+q?D>&B8`x7!ZfSBWkXxPHG;%K^ z_iA#lCHIC4`R1F*y@lL+$i0u;2grSx+(*fMj@;(tb|AMCxxLBlNA3V}UnO@4xg*IP zMee)gjwN?Ixl_oUOYSG+eoOB6Dy z**l26L)rTld*5O2dkOZw&)x~_oyy*M?ERFz-?8@x_Ws7+9QJNt?^z~0mByNG>d*_X<`n(S-HzRTEm1^cdK-_`89iG8=QuQB`XWMASQ_C3tLr`eaz zzP9XZ&%Pe)>&?D?>>I$oSJ^j$eIwa7ihb|0Z!G&3vu_Lg4zRx>`)jcOGWK7?{@d7p zJNy63{s-Cr2>Tyn|C8)*%Ki)S-~JZtZ^iyL>>tGb+3f#}{kzzIi2WxxfCHCspaKU{ zIZ%rObvRI;18E%i2M4ab11=~S2#47L$7h@bq-DA&}t5CGmLugkQk^3;IZ~S=^*GXy#F54vX~~f;92w7%DIA&0kxw}C zEl0lR$Qq7hb7Vb7e&@&*j_l#cK8_sZ$Ptbl<;W@Wg1oZirIJ^ZyoTgmM&1?VT}j^6 z7^=?&S9*e<1mT$RA4naPr?E|2^{GCw~I@ zACfi4p8~K~a-%9>=@^_J+OMV{t1?2xp{t5C=bMzvPR^Vt&j{c3K zS8()Nj^4o0n>cz4M;mkWK8`-X(T6$uC`X^*XcLaM;b_+jJmWx)zRJ-N939EgQ5=1j zqhmSxF-NCxbUH_8ada+6KjG*%9Q~D}n>o6bqq!X2&(T91&EseRM=yXZ;^=t_E~20m z1?4HIML{D9uBG652?e)Na0dl^=Q!4cV=Xz>o@0GD){kRDI5v!9 zBRDpaW1~1Wo?{|>5i;n;MJE#cTF9Q%x8UvTU~GySn2`15i8Oy|!G{(Ol)Tk&TH z{_Mn`$NB3b{wm2|N&HoYzbf-rRsMR6zh308X8hHHzgqHFd;aRkUqu`*!|}2lFVFEx z9Iwjp>Kwn06GJ&MoD*+wVkRdRabgK4KIO#cocM(kYdDe3iS?Y=$jRpuoNU3#j-2ew z$!?tN$;m#P?9a)eoE*l<5u6;!$&WZWnUm8vIg^vKIeCDSg`7OcDV!?7snVP(!>Mwd zx|~!0LjO5bGkIA%W%3Jrz>!}GN-F? zI+fFnIDI*%|BusGar&Q}zMj+n;`BY7{tu_`=k$Y|9>D20I6acnqc}a9(?4^1BZOfIv ziiRdA8b;A@ir%2;EsEZu=p%|IQ#6&LMHDTe=u?V5r)W7vUsLomMZZw=D@B_r+Dg%O zigrSLA#Z&Zls`2In8*d~41R;QTPozs~v5oFBvaah#vX`Hwh1 zm-F*E|4D-LpK<<6&VR-EA2|Or=YQq=Z=63RTqZm&ydX3cS_mD5&O$GtuM3|7g;#`O z!U$ocFiIFNd?3samI|K=D}PWF=AZ7E$sZ zQSxDtDEX8q`JyP9AxdV7lAT1!?xN%yqU1vBT5yEQs+hK+ePVzMCnIF z>BmIrCq(J7qV!Zzdb%h*OO&1?N*@!Y&x@oGNu@wMbZl*DMKV>iligs%ZTK1BDu0it|gMwMDjmH@^vElCXsxLNNy~W?-t4b5y|(9#wOi;c(u5A zt++Txl&v7jUMkAg5oPO(vJFMq=S107qU_6}Y?dh7E+NWJ6=fHRvP(qSPes|!MA@UF zY>_B?PLvB#u7oIeuPFDpDEFi&*HV;wS(M8X<=TsKokY2=qFfJAuD2-HPn3I8lzUs0 z8!gI>5#`2-auY@%9j!4%Zc(8MET01d^J%%Rg|wK%GVL)uNLL673HrN<^Lth z|67#5O_aYwl)p&Q7J^Fi$$e! zqEZD>sj{e4S5&GmDqSNgT_-BtC@S47D%~n7-7YHKB`V!3Dm^JGJtHbTFDf+^m70l4 zEkvbOqEZ`Csk^B3wx~2wR9Yx1trnFIi%KU&7E$>DQMsw8+)GsMdtpLU zennIsEGiEbl|K`ezY&$c6P14vmDh;M*`o4#QF)uFd_Yt_EGp-V$|pq?qDoy+B~4Vh zTU2>SRQaE%^0=t-oT$=VRB0&^RbCcVI*Td^QDva0GDuVzDyqCCs!S7AW{WB-MU_>e z%5hQUjHud3RJ~SIyf~fkUsM=ao?JBDF z7geW=stZKbMG{eUsi?Y4RQ*j<{X_MYT$zT0>FoAEMd~qS_sz+H;~>D^cxbQ7ubUYcHyG64knhYHx{Z6GgQTMYTx@ zQEjTIHbYdKC916u)qWAxR*PzDMYVOJ+D1`rv#6FQs+|zkE&wSODMU&Mky25lR2C`K zMM_PPQd^`n5GjpB%H<;EMv-#ANO?x2G!rT9L`nyd(p97+dWe+X7X*>=ibxqOQr;FR z<3!4Ikup=H%n~WHManXf^0`P^E>gZ0Dc_2eA4JM;A|+d-Y!oS*Mamx{<)BD8EK&+Y z%AX?Tgh)9nQqGC$A*z=Z)vJm`_4=ZELs9*5QT_i!^{Yhn>qPY%MfICS^#?`uCq?y^ zqWS<)eVnL1UsPWtsxJ}MKNZzK7uDB^>L*0%MIyC~NUbJP>xk6)A~j8<{!OG_AyONQ z)H_A$JtFl!iAa4wq&_54pAxChh}0KFYKBOCNu;(GsU1aX7m?asq$Wh_DTHoZSEMcwsf$JGuOfAgNL?#Z*NN1PB6WvI-6c|UMe2T$T96PmYKj_l zMUDGKjmJfeo}xxSQDd^Gu|U*VBx)=bHI|7QUx*qjM2#Osjh{u0HKIngsIgwu*d=Q0 z5j75o8iz%Vd{N_1QR9TDaaz?4I^#v1`J&DzqRuK&XM?D7O4KbS>L!W0HAUUpqHaA=x1p$e znW%e(sC%WTd$p*0t*Cp0sQX`0_d!wj5mEOsQTItv_Zd<5c~Q5isM}1`O|%eo`-{47 zh`Jw(x=Tgf??l}{MBM^Wubik?U(~xv)VovEyGPW!SJZn%)O$hHYcA@&Eb4U<^}33B z-9)|qqTT>eZ=k3*T-19*)O%CZ8zbtC6ZOW6dNV}5St3zyp{Tb+)caJ_`&`spDe8SE z>ir<<{VeL`ih5^7{aT{_<)Z%IMg8kU{ToI7n??OwMg7}F{r`&k4~qJai29F-`cI1b ztwjAdMg57Q{t{7trKta{sQ-hg|Ffw7t3=fQP1N5d>gS941)_eTsDDf}C?guAhz6;m zK`qgsj%ZL{G)NN-{wW$^BN{v*8ayo;JSQ475e?EsgXW?^OVQwE z(V&ND&|5U`=BN`kO4UM8nRaVL~(< zC>jnD4Tp+`!$re4M8gk7!#SeiJkfBWXt+c){8TjjTr~VnG|Uza*NKK3MZ?Xa;UA*m zjtlyZh=%#1VWDXFmuPrWG(003o)u}yBJE<4c8N%Y0E^~7b0ziNLwk=z7uKLB5j>WTQAZ!h_u}zEmx!+ z7HRn+tx%->CDJY!bWSu1(WsRTM(>D5ABjej zM5D=~(G=0>3(@E&(dhq6IAfFq05FWcCkla!Ab5lSgVrt}E<)fE1f9Uu3q*?$v;=~8 zaP4Ur-Qfm5^vg3tvd)VI9#ZQu62W392aSf5y5Sl?JbSie|%tOM2`))51yg}K7C zF&#`7bDG^;V{R~g%m6dQ++s$UJIp=i0W-!-Fprog%oOvCdBMD5W|%o>$0=%PPW+?jb9$WB7SAOjBk!#AHN~~uK11d zcgNoozbWCYt-tFRDNPT#9(6t8_P9;A-`(Dw=FW8IxI4Ln?m~BoyQ{mqyQjO4yT5yo zdzgEqdyIRWdy>1}J;goEJ=1-i~A1u zYWF(#UG7cp&F*dP2i=dlA9p|He%Aeh`(^iQ?$_OKx!-kv=sqyi{h9kq_c!iu-9Na0 za{uQ3)1!DaPg_rd$Kx?PeowL|*OTYz=;`Fi_XIpaPk|@o>F(*_sqj>KdU|?!dVBhK zhI>YMMtVkV9nrQ)Y1-p^)Az3LJ>LP}XTGm}2YtW${x&c)LpRzP@rK**7^YzvNk+1f zZe$oaMuAamlo>sYK1P)>z!+wXG{zbejLF7SW2Q0NIMY~YoNp{OE;cSRmKiIIX5(gK zmC<6XHZ~e}8}}HSjC+lTjE9X!j7N>9jF*j91{<#$yN%b3J;q++ed7b;L*pakE8}b9 zpmEsv%lO;GR7};3GuxQ)W`gN7y=In~XLdA$W`S8`7Mo>eSF?v%VfL2I@%c?*Q^V!mqbHeWOMm~Wb2nO~a+ z&2P*@=3(<&^N9JK`Mvpz`KP5@4$EzMtZXaC%C+*Wj#eit-wIektEbh=>TUJ0s;s_N zKdZkr$Qo=7vnDrLr&tZv6lc1ZZ>&Ss zVe4D#i1nTIz4e22)cVc(-BxVXZfm!*J+{|w^4W=Ynw@V4?4Vt0cd>iez3jpE5PPUS z%pPrzv&Y-@_Nn&i_8InUd!fC^KHFYuH`!tPI{SM22D{n5(Z0#P*}lcT)n04gYd>T^ zY(HT?Z9iwfXuo2=YVWn*wBNBmus^mxx4*Uz+ehsm?ceP`{0@H`e_MY$ziWm+-Jjvl z^k@0A{W<hDvA%B>elj2qO(vAe4RMNv0ElNgsOr$-7nM$_&Yieq5FUS2I!xbBq zPD;KKP=ZQ<5>h%Vg-Vf993B@QAD$4N7@ica4cCS1!;{0O9FNgOj-H*VD&6E?Q?pg2 zhunO7mL7@SQ;Nw&s?tYR-T(Ro-wHdD1pjxV} z`qe}=NljMUt102z!*_&Ng!WN~zp`O4`=%vd70ItJkU5M_5L9hF=iM)lr%?;cEHa%ue!f z;#QZsI$Uv|_bGLax>mh&>xfQE6*c_g);;-dwcRlmaWmz?V1aUabRy&CL?<%tOgW{X zNLe6P6m(L~mIn$FHS zp}Z!mOY)T01y1ca!;4;3NoRr zK#7yyML|__$exR`RIQzyx~N3eoN`B5mg157%Q6*1PPr#r)ol6qqGDA`lC@p4l@z(O zYf$MRH+Jo;WXdgP2UIObesFfSs&$lsZh1;Tj_%f33CVD`Jf%oBofA;CQdxX&v8r{I zAD&a7YTc#t+KUEr1b@9&~& zOXczl2CJHoQ?{0>+QrhdBq+;z`epT!L{+;|uIQPoER)wR$yT-H^8KF0ij*x&ljYw_ zd~LKF!pRNZTzS^E_Ht4$Q@Kgb=@pW*du7N+dbyQ5WXsZks;!o;mQjucg9TNhrO!y z@(CV&E=pFl-SWW0qg8FMR8EP3;`>|5n{sq^y39DmBS-X4b-W!(vWF~wtdqcTh$K9$piZTPAlkbX*nth}O9)!i$1_E&YE9DQX-9=al3 z)h(HENxnQXRFf4~TB@EXM_)Ny)!UyS^7~M~+%>eb>^&?^$&iOdBx$$2a+s-P%O|gi zwO$&Qr{u{452wp{PnD>8zN~&Jav7A@4o_1G;%Xz)n$zGW^|FPD!mt5x+1x$2o}RqwTOQoh_dHbJhsCSBF56)E9k*|=r;IENhdaQ~FE8@tG&3AS>rth%wgY?|OzRz^uoxv{-`VS-D!KAK^q<(=qLZan5om6InX zD7QopVEuOK+dWLxTUJgQtE`pYsz?TwoN`l^s;`&#Od6(al&{^?w`~GX>YKt9tG0Hn zU91jNypFc9BhcXtR|{oL?5q~b+GA(3!_#^`JB)C}Mu+LJ9Ja&nNOUAQk{#_GDUMWm zxIV=xlzWezijMT?dFaTJe@)F%a*v-Oj!sfcE>?okt<%vtx&u0jrEuXx*Zie%6xNM0m^A0inD$D@D1f22H;%9`*SI19}ZxrfNLVbF5{LJ{%g!+t7pOt^y zl37@t-KVh+kw2|{=EcwdpFRtteV&s;Zw-~67j9^YKVPUXzQNbD;~?L}U%=t`rSVNd zeK|ZusILh1rDM|)e_^EL`^eGn708!v?O1SW{FUL#HSw3lUmkyjP+t}5ZlS)mCjP4U ztK*jmb&pW@3ib8z1E$YfIB&juC1bEWpaku@CV2i&mBG{gc-yFXs z{{F4+-+qCz^^{e^RAqrY7!0ZjXGa$`;k>9!OSnKzDJWDEn&gUtj%vb%$Ap?kp1USV z{!-vp6E2S`wS=o=MJS*qToaXQ2`l8NwOy3!q8crsIjYeTZkFNBz0`!;R}=aPXKwb1H}B9i;&^ z;XC_k9N=j6zIXlSOu34%xQ!ecqP@Oq) zW7m-C>=+em&OlVGIYTngEmw6G$J)PM1(w`@f0=S|RH->HlT)^KQLc;%HMun=)SS!Zik>;DQ%04V^9K2T z&mz@%(+R2Oyft$F9hYi`>byhF=~bXQSIarQ(&Zz)T&nZVs9tk!i0U=x-6wQXM6WsT zm3tl-r)-JJHRraImu=3!kIA;6q8?XmJsyv!whpSRtqc#Ipt=&Gy3OU58AJN2E?-o*xvZ5t z2PhGB7SnC6_W#gruC$15Q(V$FtaDU?{yx-}*-sYA-osK=SKdjw%}`zWCkeM))m0Fc zC-a^vRSHiMZmFuPt@J-h zv&kdRIOMa}j8t7iWzSI~RM!Z3^{9!etNMQqHdoC*ben6u3|CK5U6W4IQ2H_9=2Knu z$CO*D>S~ZnM>nXhQ(LuL2c_{J(#>^RWXW5lo9m1d$s8J!uDa$*cTKJ8I_o6y=22Y> zPa3&{>N@8n`DQ8SN7b8a$uad-q`Jbg`lXqUiy|W(S8s)?E2hx)pOA0ul`D>^w+z*F z^$EQek#DYRPc-|ylB}#errs<$@s+Of+V;9^n&44g$3}-I!L?TUb`Mw9ubfn)x;Fkp!?`xednOH4U7O`=Hw{o-TO)5Y+8@(BuI=H97T1F! z?)GEnwCfSM`SuRkCk|NG4%g%1%2lgc4Ntnh3|HRg&65XT>)+P(sp~V>=dLeg$)0%* z5qB_Rzh_VXCU=6{>2|r@BJPlg`$@$8B{WrN4xzb)<`bGNv}B>B2`y7-xkAeqTCvc& z3$03M1B5n2XrqNTNoZ4qcAC&;3++syEfm_hLc2g{VWC|tw9AEdwa}Ie?K+{|D70IJ zwn}Jgg|YBk!w=J9Y4l%(uDKH&D9Y9GEmDX1~AHJ|a?%RkF%kD4RY^kn7&` zB~`~j+gd?m{|Rch&u31Efws4T>SSo&82gl%{h?O-sWDcC^67m(voYp=q}4r3_I$T@ z^KyscOLotWc^_-_o*5arzpV0B%B$Zp%mp#P`EA3zJm!0*)pxag?Co($*T(F-TJ17IrB8IGCccxBbVCgAd@JDQ z)>(SwomBI-7~sWLK#Lr)zgV_^*K4kg`Ce}IZIC_R9c|tnv%lJEzfZQj+t0o~W`C{K zzWw;Dc6!feJ{)uJZFTR64z=Vxuemekd!yC&OeFchX;bG;?>z0yGovLRt@!6+$bGHI zmm(Q;jmqxLd)vvP_Y=%lW1zQNL3`!M_h*=I#_aF5+TV#(YV)lLieb6mk9ps3_3oD) zJ{W6$60?8UYX3sI-z=8fKd{ZOW4`^ZzQgjv4{DRXi`hSJwf`8QIqFyq2h3k$fKOWi zf5@7yXBz|Q_R0({?AYNJ#n(q+eME0s8f<7=KHeM7as}UTR!<=rF`ilSCT6R z__`I~ivVJEk-k60v|@m7S^-IN#Qv(n6i-IDa;+!TljiB*Nf&X4MclU{?udx{ZmlQN zlSRmrBjUanaX&;~*#9W6J#fD4b|Am7(9Jwph2hKabpg+`>0rpVflgXN<=KKz0O8>mtn@H7+h+$1!= z&=RAVB>C;9#hw0h4)64=Bjnj2wDv+viGow*;Li&F8@$c)03pwVLhB&3^e8w(-unN6 zpY}XM$g@jmSwhQ>f^+1N&kFuKuf3kv33=WWTAt84M!}tA|If?P140W5tw3m@wVnf> zj|q7`6JAz2)BSguE3(8!5C=(c!D3d`rLXo<2rsH9{LJv~g>_ z!@R=@c}EIuywD~@aTB9B$H8vtwL+^CTD{OFul1hdZHN`#DMD+A;G(zBV)@p=Q2JD% zohr0xLYuzUd#3j+Lf!>JYZTgyXuO%xioWfez<-vB_ag7bguIsuZI;kZk9Igie)~<= zlZ({5(klshuNT@Jq0NnA=E?qtivF`Yye;0w zLR%!Xv!j@EE35NB+iph>-VNphhI)cMBw#uM^Q6k3bWR!6~WQ+)2bE8NiH+bFc{vj2|-?H&@^!$Ny7TqDmsdUd<(ss|3e@LG7R zE!{tz8~wxI~G9R^^-SkN9<+dH@r;hb)IzIU8k-^^(+S9FZ_WADhT^EV-Of=5sa7}jA_F?&pPpg=6Vr%n# z;QJ_C+2V@@-Y>LW#{+&63HYhdo@)*Gx$j=zEs=oFM+4p;uF0NRwS8EDzT&4Dtq~9T zj)p7O`40QO^&Rnj=lkCGgV0_S+RH+FRcNmXZLiSYSm*oE_ml5u-!HyjeZL89pU^%K z+Q&lsRA^rd?QrW7{TW+}m*kLNtJW;yhK5Q}Sh4$HTTtkG$6rp|oZyM8$fZ>eN_##SUPlU#4M#v~MPB+eI@On;~ zlm1f|P8HgFLi^II?xmeE$Czu(n=rDb^Yp0WtG;cB&C*#$)BMRJYep}=!Q*2K;e+QH+5FNhFcBD8P*O;{KK!xtrdC`$PCljrTy$h_&V z)2=D{4Z&|*VO-T3|7xLqdp!QNk@(AncI4mjuQLLMKN|nLX#6)%j(<}mzN3}?t;X$< z@V5!=hZf@wp&gw(r2l}CQ|C0w$$zy^S!=8hSFSehG}a02C!zhk+Sp*+CA43J_N!d^ z*QvRijcwt|HO3a>eq*c9eizywLi=-#@qn>C{%WE9CG#uYo?7c*El4!uO9e8IbH2@#P~T}xz2dj_}=)zc+U9I_$f-> zBXqCORiXQYZmctYQM|@)#_x((@e17xR}0+|x<6bUu5P|9PPwZ|qn)Xnjz}pkoHeI0 zq7IjQ(doWtgl~|!@#V2rb`;w)`W7fT7PDu}lY8Pp(5yQCPQuyzlS45)9fjU$ zwK>=vBJ_Nr2bvSxC}p04sBx@0PUs<_cW!pJRkE6wv{9U%qNu0AoFepMp_jxwkF>5rkrUm2$!!mN1FqM9t*nATqN{vLN9L~(pE`LIp19JFThf>N$5R< zUJ;9Mv3XepaEW=T(0dBK*J|@}^9rH&7J8ou*YvvM3$;94vC&*%u5>+bUgultTjF}f zywUZJ>ocME6?%UWw@K&&g|;OkC*mFy`XCXvTf}|QI{mR%C3?TsscJbsRmQP*CHjC? z=LWg?^?se=o1zn}z7&prb&O5=eX&U&+zQ@${56EDF7h$cZ*DiA2v=?}A2c5_A2uH` zA2lB{cbJb0eW=if34OTGM+kkS&_@ZqTIi!UkYhe+K1B$Rxx?H=DC`yb7@^mQxTE2? zC}ymPJKEgfQc|0ra4M^nUd`cTCAqoTrF3jc8q$BjgjsW^&pT)A)KmLToj+v3_*n~P zojSX*!RueVxVRvrxNJ#5#^U0V@}h!_Ze2^4EIGC-nh_;p?htx)Yw^Ej?vD_E+kD4- z*L=@>-~7P*(ELc~x~AlziPpPSqm0Uox3ph!F0j!sprjGw6MYJ9nyb5&1v(_89DXrSu>|D zoHxI2NdEz2r=B`|>Zy&ho6mA9mo!B-*;uct`HfR2&Y3!E?(ug@vB^Ga{uHikiJvIc zCtA#(h2C(yWPS@*tTul){}B2Vp`Us@NvHX@rT+V~v86>m8|zcE`z)UF&yUPaR$D9K zf8eetd|Gy&c^UtJ`z@c;*ONoYDksP4VO2y+EAFV! z7eyB4Y@wec;*K_Wvzx0@6@T+%sfsJE!J8gyxl@${Uu1m-oO|Zfxzih`4_VkaXF+pQ z1UWWU8Li|*aEVRx<}RE%Yi{HG1*7K9KJVDnTLZ%ttE!JThK4IzWs5b!8W&+c(i&w| zTcfQpR*f}Q=;sOje4#HE`UOH?BJ`y~Z(2_tAzrd3S+(R@_2KC4&{gPRp^I>h&@XE6 zW)JD#JS9!Bnn$K7e>7P$Xs6^@GwVmzjF~!jW@Ep3i{>sA`i0R`!J1{AUVBX5)HE(! zZJoiqliGWab)F;ii(~kC)|m}n-w9n4)rvKZ3)fg@S@R=@%Kx~UU+AEeH(6&};c(?$ z);ZR>)_K7ll<0Q)~0af8f&9Ya2PZ`pGK=bk}<)C5xWc?E1@Uzgj zv{=6i{r)J;ChJe@?{H;{^_S4Mw%7=L+oV}@7dFnHF?HIB9R0RtJHnMMwl4GsTI@DL z-#)%(-lF*@BNA*^1mP6=gDtjO=ntLPBv0(@PTREY6C7;6&>wld=Gbp<{C2XP(#j)M z=#QS5BRk#B30JPQ-L`IL+1WyWNazm;{V}2Mh!`V(J4KBTMgM=i`SctmwaE_IMd8ZT zcIOD2Cxrgc>IgE18$WB#)R~P7j*sB9yV~Wg#JUUp$>T1+U1|6H#{$`X?Ec}(HFlNV z*X}3ur-lBE(4SpH$R21%-slN^m(ZVYXx4L;3)1Zo_NWspZ&wTbxz@ln_Si^$p>{G%OYwQMliak~6FA4osp+_E;Vn0^0r`t3Cc_i4U*|UWHvd~{? zZgDGSbAFy;q}p@sv;GB}Z!Zw~ZlS-{JRwiXXtK|>7yk=!fxSfNdxidb>~I{naMtWu z3uiShh#Y>|ea`&nPoVtvh4xk9$_@5K_Qm!k_NDe^_T}~!_LV|^Q|SAI{+7_+7Wz9v ze^==53H|*I_SN<>a>%on+bisq*^!&{1EGH#_J(VO!zUcs!jU85j@HhOp3jk+t@%oi z(v;KS^+t~A{*9+DnmMyEdJ?~3-)7%VJNpiM71Qn2_L}C#L}hr!hhc<%Sm+1$*>~FO z?Dh5r+Szy68`x*P$8%!lbvv)D?22)8Nc)WuiMLlr5`Es=R`N{WR;qvU;~WzDH?bQ0-2OD&&|@BhRl3{pU!Lc$)9@D$}O=6T%HG zey7lXIo{jv_xKaTmFxXpzt3;@O~2*0{eGeUD)irk{=3ls5c;1&|4Zn9ulFbUll|@e zDYny}=I(2YJHLMmW^twQ+2&{Dt9$ zHU1)hvA;w(;)FvJ4*mGzI*(UGtXBTH^!|#7*WvijrEh9o`{QXk{r&ud!{z_|mCZlI zKa`MvxNx)+j`+xqlDn;mSMxr}!KEQ~Xo? zrwWH59Hwws!eI-C|4!>(f1`hf;`N^<^jm}@Q8<#r)xwd|?6Z~8KL45i`QgeI|5?J3 z)S`I9)y)fRCC}$S$A4bLe6Db`Zy_&S)4b7E(nk1~`okyoPQMV2)MI<6|Kf1PYX2qv zONAp%I5Jvi?h60a|J?2T%ly{}M+f0ZZyw@TGMfC-f5X2J&HfvOBU3oC>iW%_yI|hz z#@I%9_U3=;ZvSom+y8M^_*?vITG_1?j@;wy)<@WF@ZTjIdBTz3%I+S2@_qjre6xRx zaC8)oPR&Z9lA7w@?tl1Sh)4X73P(UVg3a9%m6XKjE3{#a=QU2Rnm_HdS!XxS-{aru zf3kU3p3<@D7I3zJ^ImX10L~rYd>))Hf^#=G_ki0JsK$YY4c8folY~MuBTIxN5*P4qOw!H5XiGf@?lBxfX)!9B`cvt|j0KgXu{2s|Hy=Uedn4&FHM>fmhy-ge+Mn!uY1-VWfM z3*K|Udm(tQ0`G0$T?^h#;N1@1C&2qGcwYqXYvBC=ydQ!00C+zE?>FE*48AJxjRapc z_{M;5EchmauK|4P!M6>3+rjq`_?jL8-_ziG77P=N3^205$N?h{j6yJq!MFg7E5Nu4 zjAdY43&xFL+ziH-VEhQi&tUut#_wQiU^>8@0OoWsXMlMcn5Toe0L(>TJ_JqXvtT|4 z=3X%00COLhZ-ejuyA0MU=IU(1lXg%9t-w(uqT2&1MKs_J|FChz`h#n zo4~#u?7P9<1or)4Zv%Tf*pGs}1MDZjeje;SVDAU}3$VWh`xo%L!Eb}VxC#6{z+Vae z-r%nS{}}MsgMSA2=YxMC_|FFax!^w^{4at34e%cT|0m%88vNgY|1kKEfd6Ok{|f%! z!T%>D>X7J!#9~P74vFI+@f1j$0*Ou2Ah8h==R@K`NEDEG5hPv$iI+j*6_9upBrb!* zn<4R5NW2{qS3%-xNL&kv>mczZNc;kl+>q1-lEy;P`H*x0BwYeYmqF5%kaRU9T@Ojk zkhBGwlD0z9W03SXB)twvZ$i>rkn|2Dy$4AjK+;E$bO4e*fuzqMDKgLzNctX<<009E zWE+wTA-NoqdqDDdNN#}SsgQg*B(H#E3CWuw`C&+Y6q5JRMDj~L;Ft9J^<~{ zfc6Wa{n^m|0cig?wBHHs--Gs_Li?|v{r8Y!KuR*Cbcd8tkWvpRr$9;rq)dU7Igm0B zQqF~x^C9H|NLdOgVMw_GQm$%(lx2`|Eu^e~lqVtO8%Rxo)FF^M4N}j9)cKIQ2vX01 z)bkkDbuL)z_- zb~mJLg0%Y~Z5yO*hqOl_Z3m=10cp=c+HOeu3etXprnFxm?N>)dUJmI4Abk*|4~6vM zkUk31M?-oeq|b%)Gif4y5u~33>E}WErI3C(q%VW?Yax9Fq~8GPH$wW&kiG%ZABFU1 zAbk&{zYghdL;Aar{ywB1fb>rw{WC~E3>oo|VM2xt8A*`Q9x{TE5rT|D$cWtk8Ksc1 z7&0z~j1`a}A>(GqxD_&Phm2K_u^uw+f{eQ%V-sX-gN%0|<44H&9Wph@bU`I|*`6f!ryOdn)A4gWR(q_iV^L z7jn;s+$P8skb4p2u7KRN5yK|PeFSnJgWM+}_i4y|7II&N+?OHuRmgn{a({r_KOpxn z$WtIM4)QF>^Fv+|)+1Uf<>&;)@H1d1R~3W2T= z=njEO2=s&1U`kp7tj>=8UlwP za0CKBK;S0`{0f0TAn-Q?RS4=3Yzx5z2)ZHYgP;Y$LXE7AearoJP77PumFOE z5G;XU83fBASOLLa5Uhe=e+UkuiQrHOj(}h_1ZyBT9)gn~SP#Jl2%ZYTMhKn;!80H@ z7lLO&a3KWGf#CTNTmr!`1TTW%r4YOVf>%TES_rO$;Pnu^5rVfs@OB8cKyWPt*Ed0M zBLp`=a5DtALU20-pMc<#5c~v!-$3v%6gZ&30|h=P2tq*@DCi0W^-$0V1v8=G5-7L^ z3YJ5`Jy7rf6g&t8uS3E6Q1Bsy91!wA$OoaO5(rg5s3(N#AT%99Ga$4ALN`O`RtW8a z&~6CrfzB3mPJ_%J44qZ=sEJdfts`_B2HnO&xAD+zK6G0Q-IhSN+o9Wf=yn%$+XLO+g>LUd zx4)shEtJPYc>u~wp}Y*rr$PB_nkb(Oa?qi^P9dw@z-LHY}&CvZO=>7zBe;&HO2;C1s_n)BqFVG_sdIX?H0rVITJsP0L zROoR*6ZE(QdRzuQ?u8x?LXU@`$H&m)AoMr{6-iK$2^HB;(GMzyL&Zp_I2S4eR9plV zcSFTCsMrn_??J_P+zA4AVip_dN5+|bJl zy~?3i74+%{y=FkKdC==D=ye@5^|}pu-2uIJLa!H~*GtgrTj=!*^!g2Y_k!Mop!X2y zeI4|^4SL@Jz4t@!ub}rq=+h1Q^npHoq0fcT=PKy44EnqWeLjUgpF>p%R8>G#PpG;I zs;+~o8)%~H091VqRo_70Ea)4Az9Hy48~QGSzUM&ShoSFN(DxbW`#1D!3;p7u-$>{; z0s2jXel5^%BlNom`h5-ket>>ILjN-8-wXQpf&NRN|E19Xa_GMc`tNRn{(E462?J7L zKnEBw9tJePfT=LxCK#|92CRhvAHaamVZfI#uoDa{hJmFpa1IPS8wQ>W10RQh&%wYK zU{E{^GGUMngQ{WBBp6f&gYJSs_rsuVFsSJ_7#s(Kbr?Jh29Jfo<6-bx7`zDv-v@&a z!r-GY_$L@r2t&$YNDmk?2Zo#tL(YXETVcp!FywI8DGZwh!{)=Vg)nRf4BG|6o`>OH7@iEnQ(*YHFkHa! zi(vSRF#L5G{w9n_gAsW!q7#fb6Gof|BNoGmEimE{81Wd4biqhJj7)-&4Ky+GG#Gh0 zjNAqzcfiOeU{qTe<%3ZsjG6$WrogCEVbm5F^$3i53`YG4)efj`3)RD*dMs3rhw61u zeIHbBf$Cpjv5+%|%f264bl_HTz(!0b^5OY#NL`6~>+cW9Puw z9WZtmjC~%)<-)k8&M>YB#$5s9R>HXJVBA|UZa<9s7{&)-d>0ts6~>+K-|3Td4gGYJY;-U!e9ksQnx26sXgn&INVt zp)M8bGNCRT>LL$>bpfa=fVx7cD~GzVP&XOs8ldh}sB46}v!U)hs9OwmOQB9c-78SH z59&UFy3e5Q8>ssh>b`@zqfqxd)cpnZ3e;;*?}YkdsIP$f2~a-;>YGl5`bMZf4eA#` z{W(y7G1OlQ^_N5al~8{*)L#Sj%c1@@sJ{d1S3~_;s9y*58=(GfsNV+l+oAqJsNV_o zPeJ`#Q2#E}e*pFSq5c!7{~YSSg8FZuss3B2{~qdpg!;c=GB7y_CTGIrY?#~=CJ%(k zgJJS?m^=q2&x6S~{9nuV9h?P~{g3~3D4}-H_rEWXYQPt_c`x-{<>%GJ%y+K$5Ws1 z)DoWhfu~mSR4z~D^Hd?Nub}nyw7!wn{b@af)-Tg~IjvXI`d3M<57GK0txwVVG_B8& zS&7UlWY#A$mCQ6U8KkIWCq97*OVGRKnn zKQgC~IgQNOWX>b=_xqpu9hoc0Tt(*3Wd1^C4w>u8+(hPXGV{qSB=b0#C&@fT=2+TKsw2Wk5-ZU0N# z$7tJ;w$IQui?&^9+l{u*(Y6=Kwu5M!r0t8eeS@}d(RMs-C(w2hZKu+925o24b{=gP z()LT*eofmYwEcm$t7yB2wufnZly)h!t3|syw7Y_K*VFDs+GW!28QLXi_ZscqmDKJ% z+Rdch=d}Bhb|-0fp7ufeTWS9v+TTt4w`u<&?MKpn9_^RX{#)9ANBiZpUrYNO+V7zK zZrbmo{eIdXqWw|Ym(czU?a%Two-WVRDM_Bbo~PUL^t(L0l&1^nP>l|0bZAV6W^`yl zhgNjBhz>W=;Wj$lL5Dl(a5o+9qr+2l$fU#5bm&Bf1Rc83;W;|=qQek6d`O3n=`fuR zGwG0=M~8)U_>vBb>9CXz-_hY`Ivk)w5gm@v;RGE{(Xk30tJAS29qZ7sJ{>#I@mV?! zpyMDqzD&ng>G%d6-=^dHbR0p)QFI(j#|d=&k&d|}I~LHfgpQ}_SVqV5bgE9LnsjPH zr*t~Cq|*g-x|mLv(&-91-9)GVqtnfFx`j@+(di#_`WKzL(&-I4&7spqI+gKEQ=Yky zXD;TMOL*ogp1DR+=P&8}HJz8x`CB@#pz{xO{)x`N(0MJL*U@<+owv|=JDqpYc`u#w z=zNgQN9bHY=VCe^r*jFN&(QfS2_(vsNFh;$M0FCiNYo|KAW0&PL=zI}BwCWVfW*Zl zE+ugVi9eCJhQ#$G{!HTkNZd@~RuXrR_!o))khq7${UjbD@m~^;lX!|m8xrkFbR^N4 zL>Cg>Nc13)>`kH{i2)>DATgN4%OqYS@g|9(B;FN0TPEvlWHlqJ1zD}gx`?blkaam(em#zpko7THpOEz#S(C|{M%GNS z=8!d?tk22%imY$QT1wV;WPMN8k7WH!)~{q`leM0#O=N8)YX@1o$=XNOezFdcb(E|^ zvXaNhIziScvP$Vvfi5X@xr8oP(d8Pt{D&?N(d7}kbf-&yx(uMpCv=%cml<^Vi7xBt zvVktebU8zpGP2%#d*PV3T&9kj| z_Hv%Rl4l?1+4el!foDha>;#^j$g@B4Y&OrXTJyYm;Aw938=bz|#4?UCrrRQVxe4d^!(eo90{*Rth=sAs^IrQ8{&mHtUPp``K zs!Fd*>2(dguA^5by`G_0f?hAu>kWFnMXyElT28O;>2-i!Mf5sG@A~v^M(^e%d*4X! zTj+foy}Qu6H@*AP`yG0JMDNk`o=fjV^!|q48|l4^-h1d%g+6uZQ=dLp(C2#k+(@6t z=+llqPt)gh`V6Pf`}A2rpT+c9N}nzC`91&NCzrnHTZO*W==(?d-bmk@==%tLGwIuw zzC-By7JY}(cOiY3(03Vq_tEz#eGBMUlYR~9*NA@qq~Cq?dw_m@=$EA5VETPRziITF zLBCbMgE{nDPydSauSx&f^uM0|H`D(X`uCv!0QwK2{{;Hap#LoTXVZTx{kQXcMV_z8 z^R;>YCZ4~I=kMV8Hay>%=d*bJJ)R%K^W%7aCC{(n`D8ZFmocCs11d4#Vg~$)0ar8N zNd|OaKqm&g!+?(%Fq#2N8SoX97OltJkX`a6UE&7l7g`JX;86+>7nkzlkG%L3gHsvYoWU&_+>OEg82mhg zzi05T3|`BS6o%AdNF9dU$B@U840(bf;}|lDAyXK#k0D1HQou`B^U_~<>94%>3NH=g zrFVJhS6b%u} zw;J-+-MsY(Z#~LepGbOZ8gI?ut>e6PmbcFH_LIEbfww#H_AK81g15h7XfU)YL#s3N zA%;H1&`gGY$k2}&I-a4$3_ZipGKSs8u)7#`55qoU*e48|z_6nXJHfD$B=7u%cW&pM zfAG$Syz?>djOU#_ymOd$j`HpwdG|)%y@_|<8IYwgS&5ZmPBkyG7tBibyk;568 z&B(2c+|Ea7eAJSUTJcdkKFZ>wu6#6`kG|xiMU1Mzs2Y++)ne2WjCz_;9T_#5QF9nI zk5L7TDq+-VMmJ{k#f<&~qc3IjWsJU&(Kj*r4o3fr(f?ueJ&eAe(GM{?lhN%M-GR~1 zFglCT&ocUBMz3P@sU%}AX3T#X(~~hTGG+*4USZ7ZjCqSO!x-}^W2Q4^CS&F?W+7v~ zWX$)B`GGO38MB5lIgHuFn5~T2!I&eAImejujIGSr28?aU*rtqa&e-3h-`I;8dnseD zVC?mbeSon~F!m|Nwqb00#`a=tU&i)l?7-iNjD4N4Z!-25#%^TnKE~!T_9$Zu8GDSe zCm37C*z=4l$GD1&tIoK3jJuKKxQ7_mnsJ>N*O_r$7}t$)0~z-MZ|+{> zGZ=p%<1b-lsCpYCEpMJCi_LTx5o$%Gr2@Mk7G z%!JlVXv2hoOn8L}uQ6dZ6TW1^A|`BOLM{{b^H~KxtHEcrNPhNbKD(9AZs)Vt`0QOi zdymhS@YxT1wu;ZrF|iU8t1$6$CSJ$H8ruNk|a~EV#+m4 zd4efVGo>R_hB0L%Q${gmAybwxWf@ZrFr|no$Cz4!si{m&W9s!xy_u=EFg25@&oDK? z)Hj&=9#cPH>O7`?&D6zA-O1DgOidnQT6v~bXIf3BUC*?enRW})+B2;S)1GD8Fs6-U z+9;+iXWD9}{mQglrsXrOkm>16znJNNVETWU{t(k2VR}EN4`%vHOdreiiAAp2XlHc=VRtf zX3kXRtYFSB%vr;nlgv5K++gmd%)N%W*D<#Zb2~FPi@C|SnEL^9M=*C8b5}91m^9D2TP3FDLyq}r3o_QOY-+=k)%x}T`JDL9=^B-n@ zU*^Ba{2|Q$jQKN3&Y#WvpP0Xn`5TyD$ox~xKf{8SEVz^fm$Tq*7Cgd&M_KS93*KPC zTP&Exf-hL`6$`erAddwHSXh~bby!%Bg*UM9?<~BPg|D*k9TpB};dV(2^H_L*&oANg ztN8pHK2P%b>wNwupZ~-c;FU$o+j%lYC;zIdK5UgC>a_+lMjZ0C!eeEA2yyqYhs z<;w(L_Tcex$Tv&)W*Li9SX_(6by)lWi=SZeQ!M_7#h|6$49EP0$1EF%bT&h1IwRfd3Tn7$?|VmzMSQ! zSy7G^6#SPFs#UD|nV;(MQ&WCQ z=cmW{sXae+;HPQ)G@qXq@^eLguF21}`ME1U_u=P${G83tTlslAtDCd>5>{Wz>Yl6~ z$m$nZ{VmDWKe2i>ztrNFG=6E!FOTs{JAQeZUncO&41Sr#FNgW%H-0(6uh;SG-}v?K z{5p_dU*Xr+_;n?}uHn~g*3@H7Q`V%j<{8#J$C{q3S<0FpS@V;mwbfW#pS7v1eSo!3 zu=Xj|zQEeoSo;QR-)8MQto?|!qgneoYnQY3d)BUG?GLP7&)SV-Ur6>PWM4-1m1JK< z_O)c+K=w^!|CQ{&lYJZ6|45SkZ?f+q`(CmiAp2pmA0_(Y zC%Y%veaP-l_CT_KKWhltuaNyZ*>90OjO^iLe?ay~viFmHi0q?e7yiyUM)nD^Pmx_p z_BnEboC@SrBBv@jHOQ$=PCar`$!SDRQ*xSCf&PsCD zkdw{2hOE1Qbr-Sj53IY4b=R`)de*(cx)H4Vh;?IFH;#3aST}`ryIGenX&~+7JnR3$`hT(hPS)Se`g>XbDC-|*{d=tcAL~D1{b#J7#QNE+pUe6KtUu2BldM0@ z`cgJjU_%NU{?3NG*>Eo#9$>>mY1}XV?Q=N&&EM)OtSHHHonQmWo-O~jceGL!^ZV&+`-0OY`TC=SFtI1 z4V$iK(~WGpg-y4y=}k6`WYZ`%jb+ovY?{obscbsLrju+s#imj=on>`7B$ivZXp(YO$pbTN<;aDO)nx z(uFP0vgJ9p^kmCGw!FZW@7S`2E!k{Y&z6mB*~OMUY)xfrE4E(9)=SuWDO;~$>ve2> zo~^I3^$kf|-)8GDwvJ@$D7LO;>khW=V(VVE=CU=Pt%Yp6kZo79?OL|oz_vfL?N+wk z&bIz+dzEdkv+XUm4Q1O1wtd95HEi3?ww-L-!?t~FJIc0#B-<}$`?YMpp6!2T`(N08 zJKO)k_VH|=$@bZ7pU3tEY+uawrED)@M+J7Iu%ik)sjA#?BAf`6)X;W9MXc zPG#pjb}nG&adri}%CjqlU6t8Yhh6p9)t+75+0}zxz1h{5T`#h02+3V5*p)bozKGq|u=_f8-^lKp*nJzj?_l@y?0${iZ?OA4c7Mq3kJvqi-5;}iGP|d- zdnUW*uzNnce`WUx_LOJOW$d|D(w9p6=}F&z^zo zNwQ}MdtPDBc=r6jo(=3d%ia|B)@E-*_Fll=i`e@|_Fm22>)3lEd;h}T+u8e1_THIf z@7?UZkG+qvwWA6*>eUZIyu=j2DzQf-4*!v-SKW6VI?EQ?rli53sz4O_-jJ-dx zcN2TJuy-$e^VoZky+_zvz}^z}o?-7<_F-SLJp0nwcL)0(WZwYxz0AI$?E8>?pR(^W z_RVD99QMs;-{+TyhtYyO`W%87Lj|J+%j^{lUI(s zisaQKuMTAfGvsBF_bhpR z$$K$L-W%kNAnzmcJ|XWj@+Olvjl7xUeNNt2rG#4p-oCB@S2R@P!<{i^Gp|cnF8z;_yct{*=SB zIXsuci#WWP!^=3loWm$9aj(p0I#T?1y$T5yK=4dO9Ue3|$IC?8bZ|CUU9KDaD z4|4Pojy}fGb{y@%(PucC#nER;9_`D~7diSSM@MpW6h}Yh=tPcA;plXZ&f@4799_iG z#T;G6(d8Un&C$&q-OJHJjuvzD3`ftBkNooFr;uNZ{JP{fAU}=#Cgfj0{#BClZzBJn zTa3htrczZ5)9!BZ5pp`bklT`1^AK@SRgQ_zos!4wRmU=#(DD40S)axMi6DENYc zMHDQiU?l~sC|FIw8VYhK*haxY3Vx#yh2rhyq!iE&Kps*E%7g6{J3NNSd zItp*0@IMqjO5x)awx_To$->SQcA>Bvh5aZTK;a7%4yN#B3WriSn!?X0oJ-++3csOn zDTUur_&tR`QkYHQdI~pDxRt^k6z-?6h{7`zRida0MRh4^Kv5b+O(YeiQ*<#!mr`^E zMSr5`8jAjpqJLBL5Jhb%YEMxYin>wMgQDIP^`mGoMK4qI8bxnXG?bzd6#b8)DHJWF z=nIOLQM8<*l@zU_Xf;I}k`!&GXd6X4DcVEPVTz7ZbdKU`6xX0QmEuMeH>J2a#TgV| zM)8#tUq$h?6yHGc-zmPE;{Q_oG{qe$?nZGBihEPskKzFozfAFKzuUe^@lcB2rFay@ zpHe)V;<*$rqIfaI%P3w>@k)x>7?;$FUnZb`!^L%jVclj_u~yK91#atbk)h{ML)#2J_oX{Prrpz0PmL`R#pv zTgz`d_-z-z?d7*ze#_^#LXHn0dHi*bzsd2T9DkSN?{jSuL`eoE7f^CBC6`k2 z7fSv{$t{%JPRTzgxtEeBDQQhfTS}g$q*IcT1SLHw=}k#LN(NB!HYM*+@*X80QZkZ~ z@s!M@WHu$=P_mSg?DgnUY^A*+$7uO7>8aOUVIB4pWj($r(z@C^^rm;8gN@ zPTj_-J2>?(PTl!C$f>tEHH=fkIrToLe&o~!PHp1UR!(i_bRABobGikmTXFhAPCw4+ zPMq${=`NgpmeUhBJ%`itIK7b5Uvm0uPA?&OdMl^*aXOFF2RVI&GdNR^GtD^DoHH#r z(~2_}apn)4xt=q3a^@+{wBbxU&SY_>FJ}gF=5@{t=gc(D%;n5aoY~Epe9jbd<``#A zaORYxGo_R^q_hR48I)c~=_Qo@mC~Cj{X3<%QTh)`|4r#5ls-o36O=wfX@b(Oly;}I zC#8KTO;S3T(jk<-L+NlzKcI9ZrK2ewN9iO=r~Ga>ozhv9E~oT+N>@_4iqh4TuAy{0 zr3I80Q+k}zla!vPtP*8aDXT$QDrFZ?b{S=DWyrR)jH zT2uCWp}wp)Wql~?PualVLCW5y>>bL6Q}!`s<0+d!*(A!QQZ|FKg_M0k*&@moQm*m_K&b`99S)AL>`O2KH z!})rgZ^ZeFIDZM}|H%2PIe#7JZ{+;VoWGUxcX0k+oWGy*4<$L@n)6wl@5=dJobSu| z=Q%%!^DlG$HO{}u`JtR2!TE8VpTzm8qFfbGE>)CE6Xh-xfG<*pUw zZV=^e7v=sb%KclEyGxY2SCqT|_x+$K*H4riC(8XG$`y-pXGHn(qI@+`zP>2mP?T>W z%3mzXUoOhuEXv;^%HJu<-zCc5E6U$5%C{Ef+l(CDBbp~)7Uka;ONR!0kL!{IdDGfwQ znn+0(DHn^BKS@T)Uq#BzBIS0G@=uX+r%1V1q&y%}9u_H2h?J*AN>`Ecyhs@+QU;5Z zmqp5JBIRw7@{UM(Po#_yDW8dyxgur0NcmEvd@WLzh?M0bWu-`2B~o%k$~KX5Fey?F ziI;YsJv5D-XkjKipnQMI6}BlBhaGRGluWt`Jo>iK_cVwR)o3MWWinqS`Z}S|3p@DXP6Kstps>hKp(+h-xE6 zwb8#TCWvYiMYU<7+DuVxj;OX+R9h;ltq|3I5Y>JX)z*k=IilJIQEjKFc0^Rm7uAl5 zY9~asQ=-~gQJtt>UR19xsy7nVn~3U{i|T(A)vp%SuM^d86xIJCswe*@s^222-!7^@ zEUG^$sy`vBw-(jgit0~`>YYUOgs9$CR39p;&k@yEi0ZkbMtM==5>ewnqDE&?qmQW3 zPth#I+~ zW{RlUNYwnRsQI|4`GTl9QPi9zYR(Zg=Zc#1M9r^7&BdbT5>fMKQS%p3bGxXyOVr#e zYUYWW2Sv>zqGo}pSuAQ1waQ6Gt%{;nB~h!Ys8vJMswHaG6}4^@wVo8U28&t~M6F*$ zt$b0tzNmeXsQpJ#`~O7kn?>z^iP{f}+K-Cb?M3Y_qINe?yPv52wy6ESs6AQKo+oN= z5ViM;+PR{3NmA54BWj-&b%;9UMV%B;r;4akUDT;1>eLl=E);bx5p^yTb*>b3t`c>w z6?JY9b#4-M{wnJHUDSC()JcdsFNiwti8`N%I$w!8zlb`AM4fY@ZUa#_UDRzU>LxQp z-OEJX8%5olMcq3@-Frmc`$gRcMBOJu-KRv|)}n4FQ8yv#W{JAJMBTokZa-1?6;by! zQFoZAJ6zQLK-3*6>V7Qhej@5l5OpVsy5EYrn?>DIqFzN&uaZdCt1ar)6ZKL>y+)#5 zQ&I0iQSTB_?=n&EN>T4BQSV=(UUyM1De4Uu^*$E$#*2CrM7>F(-c(Ug^ZxeiQXhh= zoDmJqiqs^LT3)2q7O8bbY6Fp)CQ_S-)O3-0nMl1tq+Tyl|147fPo&-~Qg0Qh{}HM8 zh}8Q<>O&&+zaq7bNKJ^;z9O~1NPR)14i>2|i_|wn>f0jq9g#Xhq)rgY)QKW>ib$O< zQfGQ0fmN2DGUsYgU=fk-VD4I7Gve-{lq zh=x5x!(O6cU(xV+(QuGx_@ZbyR5W}?G<;Vy94;9Rr-+6NMZ+&d!$qRuV$pD!Xt-Q7 zTrC>@DjIGO4L6I1+eE{iqTvzIFkdu0CK{d)4Nr-NXGI#3R$io~inQh;?Q)TJrAWI* zq+Ku4{w&h|Cem&ZX}62C2a+P~36b`UNK1&cZX&IRNb4=qo)>9@MB0lY?R}9pR-{c8 zY12j8HzIARNc&EteJ|2}6lp(;v|mM9wn$qq((**wL6LSuq~(jWLXmb%q@56Hr$kz* zXp~G5jhc%_SBOS8ibnT{M$d^xuZTt;iAGaJqZOjjPSNN$(deXTbV@Y(oqSF-ZYUbx zBpUxiG`?FjeoQoOCmMGUjh_*Xvqa;rqVW*X_(Re7Gtu}9(RiI`yip_@ZxM~Ri^jV| z<2|BDJ<+78Xp%0PTqv4cBAQ$#np`QG+#s6VDw^COn*2*NxnDHtE}HZfO+FA!#)&55 zMUx4l$qdnCwrDa>H2Fp}StXkMESmf(nq-S6TSb%YlF_uDXxdaXy;?NAK{UNvG<`%g z9W0u@Dw@s}O@9(iSBs{3Y$0hiJM>G~Fwj=82{SqFI_~)=V_}r)YMcXx2wG z8z7o}BAU$-&E|+^^F_1IMYFF&vu~23S*~bSESmi$nw=EQPK#z`qS<+oUO}YS5$W|r zdSj8^Or*CE>8(Wir6Tq(~nn(#MMQ|B3V&B7K%fUo6s> ziS*?neWgfGt`h01Mfw_%o+Hxti1b{Men6xj7U}sSy-1}0Celxe=H*56bkY1$(fn_s z`QxH_KhgYU(R{pUzECv(QZ)ZsG+!c`e=C|V7tL3T=D&*O8$|QXqWLz_e5Xh@KOmYP z7R~cT^WPnu7R}2 zk7#j^Xz^ds;&IX9Dbb>hXwhD@=ptJ57cE{EE#4C?z7{Qhl#CWXi59av0Jn#5iKi-mMNlT9nrGBXxUJ-Y%E$f6D?bamY0c^SBjQbiI&%jme-4xe-BI9$B@s-H Date: Mon, 12 Nov 2012 17:27:39 -0800 Subject: [PATCH 002/136] Oops - texture load command went missing in a merge --- main.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/main.cpp b/main.cpp index 305fdd2d92..c791e278ac 100644 --- a/main.cpp +++ b/main.cpp @@ -578,7 +578,9 @@ void display(void) glTranslatef(location[0], location[1], location[2]); /* Draw Point Sprites */ - + + load_png_as_texture(texture_filename); + //glActiveTexture(GL_TEXTURE0); glEnable( GL_TEXTURE_2D ); From 7dbac3fbc95e89971020fd5b8f5aac6e4f13175e Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Mon, 12 Nov 2012 17:31:07 -0800 Subject: [PATCH 003/136] Work in progress on field/ball interaction --- .../UserInterfaceState.xcuserstate | Bin 98898 -> 99083 bytes main.cpp | 14 ++--- particle.cpp | 52 +++++++++++++----- particle.h | 1 + 4 files changed, 46 insertions(+), 21 deletions(-) diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index c6b054ad3dc6ce046093b101dc83ddfbf9e020f3..7f7bb244fffdb9c9c784aa06962122e08f953684 100644 GIT binary patch delta 30062 zcmZ^p2Ut{B)5m`cd)JEWt$_4y0V$%QV(%TrN?&zB6BMP`5O>i`|9KK+;e8;KmVCCch0$IJ-gJhX1Qg9;roU0<;I}4 zrd}KS)dz=~B26)-SW~L0lPTTQ-PF_6$JF06$TZY6!eug5o8%)7lb?^t>RRX9$5eCV z4PS>jpvb@2f13Yv|2zEe^e^$B;XmJhfq#|%LYKYiD~==9qt;{Ahpo?ApR+!1eZhLh zde(Z*`lj`g^|JLN>&Mos)@#=9tk!{v$cBsp8ok&ESN@^raUo+Hndm&hyR)$)3Iqr64lCfCUK$a@{~ zKKY=0NPb8@EFYJjl%JBHm0y%!l26HR$fxBu<@e8lJ>hAX3#amqv`Uzw~-Q)Vc$l}e>b zaVX1_mCAZ$ld?^5>`>~I-AcW3uX3Mqzw&_cpz^5lnDV&tgz}v7n)15xhH_dtqnuUF zDIX{ol@FCm%2&$Q$~VeY+TGt`;t9CfZbPj#w7U92uqm#S;k zb?SO`tLj#_sduY&>Tb1OJ)|C0kE@TXPpQwUFQ})~SJl(%8TBpoZS}l*LA|P8Q@>NM ztKX|Xs6VPdsXuEiw3eEm)=Kl&teUJTnySTXZM3#pJFUHzpe1S@G)I!wMeCyt(}rtf zwDH;mEl11O3be`E6s=ghLz}6UX>+wI&7m#PR%xrX4cbO+hgPH2YCE;v+EMM8_ONzb zdqjIwdrW&=dqR6wdr5mmdtE!Doz>pfKGHtcKG8naKGQzezSgd4*R(&izjSny?xU3G zzIrpgxkI<=vL2uZ>Y;k99;e6aZS=N!ik_->)Vt_C^uGE~eV9I6AFGek$LqPeO`opc zq2H;O>lOM+eU-jiU!$+p*Xir^yYvnEZoOXLqwm%C=?(gR{eXU-{-FMh{;d9-{=EKz zeo}u?e@Q>3zp9_s&+6~!@9I}{$5s8B{+)hZ|6cz=|55)*|5^V<|4skP@G&H#snNpF z4Z{d90*x3W)`&CWjW$LHBgyD!q#50eo<^oI+8ASG8Douc#&~0bG11633JkkZWRw_X zM!7NHa2RWhwZ=MQi?P+HF?JaTjeCvzjQfov#&P2j zNgnJTE|Hh)W9se7d@#<()X%jmDAqL4Wy&AqV;bVBbA-Dtt=C+avLshnrs887?&`B` zu#ah!>r#IBkw02bumqZ{KEA{e=VLK>LaOnGRO=1t{jOGL_gyzh`>4+D`+YPY-N*0= z@Coz@@@ef8?A+=r$rNH0B~7%x4ac-O9=7$2W@uFR^A zK0b-A(a}Rq$*$Vd(XOYXo0>YhF6D4WS?VmiE%lZ? zmc5pJmIljyVLB;HFACF3!gNZQUKXZTgy~gbdQF&K7p6CEMR0Fp1P^#4cA1}-%VfGQe*M-?Py3fOz4{frqTa#OrlHN5n zAtI$y*K}8(kM*Qnf15L7kAI$jzJGy#p)gCrY!T)r!rXL^|78CuB>LNhxtTCG_X4$W z-F10^>#vWa+LroPcvL~E!p_ZQ{?UOodo zfxWnA=vVLMR;6_6+$A9*C9P|6^`xrQwB)peh~(;XG^05!Fs#wmHvfb0kzrq+@%?W! zTuI*+6&Ki}Ux{+`}-dU2cFs*dStDc(eO{fCzW)(8GqI;{^{50Pj+EX;F+ zd9IiKJl7B3w()($`lM^)rNBPcr>swtXnjVQ7YK8eS7D)d;SM$Z>UvI9YNxb>h?JC0 zovX8}Qc^mnB}8;g`-kTjt*?4edRbqyzD}a`v@kCcW``HW=|OpHhxSOG+>HdfR%zC9Vdk?^{10(fXk9Y*JguDy4KEDkpo=fYM>?u%0VQ`!NR;rm|b3l&93}w@xGCAT%$s~+=fKC zoiJ||X17;io9pPc_DRWdr~m1-v)qM5xvMa57v>#an;KWE?_zy>$^9Ea2gn0Slm`p* zPGP>stFX&8`Mda}k#g4m1RX1nBT=3p%)5oT-fOeR^~iVaeDmbV9?(|u6nSdnOS4ay z8(b}}N433Ep5=l2FYm`5dA3~EsC__~54y%*j~YH-UgXT!BQKDv7~?wi zV}|SZADU}*a=kO7PTnod$Li!g!u)W7HOTdl7BsMNC;Lp^?<}a34+!(|H~ETYd?Vj0 z-zVQMKR{f8)fm!$!05{9Q|=sIT6$-ut=K-Ya>itPnJ_;p%#R84BhD<>tRL1Y(S446 zd+M}vtbUd|xKlnNA9eNkDO{?TkI4_ay8qNidsKehL*g-Eexgo(LYSYtNh0wUi4z_Y z&j|BVw@ExF-!H#FoG_p8`g+=#<(mEdQdjoRs`iTfng{MxVSctweqETKy9wu7^m9XzXkhtRl2!u{MKFTq4e@_ z>nY5a>y+NY{LxKrj$7RNd$7{TBWP5B^4B{{G+iTa@-nfEWJ~4a zM|oN~p**8JD{iIAGQ z-uP>$<1OXA|E76fxuCo+q~=0uA*7bky{&lvjOi9J$yQlWI>T0B>vG%ON6Kga4dip> z3*}28wGxuQkgUgcvLF@*%%)oC94B9DD1ul;oO4k2|GQWtm2rlzRiS!(&e z6e`q8A$1i}w?>2oYPCn9N?jLh31`US0;l%hgr?Qdq675mFx^^=(Ah zpt?K?8$BHQdpT@Yw+Lx~kOq1=ICiKzJqrJb;vRLEkOm8BNObR|8@%Ni-Fw9>??Yp6 zct+jp?0!()r#AS#r5;cZ`aR=!O1)owKz&e1!-OFmeD@09V?xRj(l{Ya zxV^H68?!y!Niz>*5yk4mjZ5}uqh#dmC42JkB?~;!D9pSqeBN_N>g+uv9Xi|0mHd8l zO?|OZYs_t}S3D4{r`cvs?J#B5tlKnRZ`2&?j%;B{ai3^rlKsv#PK|d`VD0V2eWgZo zhc!1fZ~AVdXrep0xoOj;AEim^2kK{@H{hcBp?XQZtbU|^tbU??Dx_>7~H*omSy{y};Vj?F=#nx!r!I zSohN{O;0%jhx8vXVfxIerE|yHCik_K4JjW#y?pxQ68qonM!li=c&PlU{-*w}{-OS< z{-vRrgfvA+Q-x#~Qjw5~g)~h_(}i@$e$AH{;xvoaL~BZn)?7$;Iz3<5(kUUG7M2vJ zRai1St$M)J=@qG^Wqr!*wgRhlNdEy@(@N)#w9T1bY^x|OE3g`U%FCyhSJ-A&cr;7P z%7@$LmsVD|XSXsfbL0%^KVYnF@^IT^d&%EaHC+pHX6)AtEkFy@g0$9Juoj|)3aLa$ zGlVo#NTouWC8XIxDic!qel1*!@RE(zVzgLK)+&UwvXSjUA)OG?Gs5?Jc8S;T*fP7l z@gO^3>U6iu-!vztz-sky?r)!5SzK)QM)IbXtfkOQOVv6uRqLd6cH09?!y_u42x*0o zmcFf}YhAT&T6dah8CnlhjMmu{qV;m02{4TcnIoj7LR#+REv>KCPwP)JZGbjV8>9`^ zhPdMt)3}ySA?tC&8e#O^6pZ+F%vYLm9ArbypxO1G+}@DaC+^Sl<-HJ9dgrq^qmwJq9KA*~bAdLi9auWchv+b*OH zo;u&}-fJ~YE!^edRwty5w~P&Kk9MCkqe0uN?b8~x{n`QTpmwj2HVMfkq|HLwBBZTC zatmo&gLc36fcBtvNE@de){Y42ZXsV!BU z?Gn_Bcq0sOplQmJrc7y;Jq=OBwCD_KP!PulBw6gZ88LllHTa z?iJE~Lb_i_4+!bOz1j`!SM4{GRr^CohlKQykdAoHLdOc+if+o3bxCjH%&5~XLOSfZ zQOtCg>!vtaZ>hKPO#2DxXdQ9REcZd(6sqW|u6w36Aw68D8$vqnnjIdTrnh!>->nDh zAwqgYNKgKqQaxOca;ERrBlJiiJu0NfcI(l4jF27|(i5)VcSO2}7^Vn^-d^wUFRdg! zSx8R_>FJz)r8CP*OYELz8{K=(A@7!Z3*XQ?>7AV!yKaBX=w0>hxA|oV>Dk6IdPeW% zVc1*mBc$hqbn+HLOTE87deFa|57vhW>3Jc&FzIHqFD;AiUA5~!T({Im>ZAW8f1}*g|deOhIi}h(j zdR0iTWk&a|>isV&GxS;iPrWS3tN%uH@6w3>P&f2By2F{VU!SYb)9338^eTO!Uac>3 zk8W*h?eEluzF1$PFV&an%k>q<7qm8onMQ2Vw>vW$beFza-=c5T-TF5DZXul)(gh*C zFQgBIbWunj3h7bQ@kSF!0h|^Cvvpn=V3+Zbi zedB3?R|>4r?)*@b;T{=k`or-u%}g=+E4d@H#@J>S+xwMP&a4p9x8A0$zox&Qee*jv z%U-cte}mG0jL+(g#%F|dwNd|^{$_zy{`&*hdufnmuc+7G(%<%eUHm_h`{giGdf<8e z6KBQ&{eu3!{(*i`|4_fAU)Dd;KX$hcH--Curhl$~L8AVZ{xymEx9-8=rf(c6U8_B9 zkQVw4{Z}FVv8$26@A@CPe>?HI{$2l*(s3iRvg{Q%^$k;_zV{b5!+a~Z`VFJW-va%H z(cCk*Rj`KPXUNWsdZU%$Z&-yz^%WMgut@dV4dOhflS9rd_sIxTK~ifY+?lb*2sT2D zP$NuOng~l%VQD5T&G#4)BpOjhw6L`Bl#Qj8u=u-cBTWJBqDWKMHvXP-mc7J2rNTbd zbE3KByQR_2NN}dt8SRCoWu1}eel^k*?wewyx_8Bzq8%-b&PMueaaUon-m)}|45P$uC<s3E<1#!?JbdE z>@@EA&*j){?DJGry|KsGD=h7WB|%sc>q#{B8wb4k?jS751y;8u*0eC}LF1v@>wH*P zl5PVZGamL-L+r4}j3@qX#Kx1tl6td;7$=y2lOXN~8CrK7NP7M8BJ%v%~KjZ^>G zUX7QHSKQV(Q-XflIQK8no5ox2(Q&4D{k-wPzeE>}58dnHOi`o$<1|}f4XCP0^!(7) z#k0kyq^2h)M0Dw#a`O%5GvhO7L7jS0NZH zAN;DouNwR|gWp#0+XjBy!LJtlc7b0#`0WF~1K@Wb_&o@Ihr#a@_I$^F2Cc4xqty@4>L+OR7xA2;QtQzzX$#o!2bjA{}B8yga60i{{#5{1pdE(|F7Wx zJNW+vRv)lRU~LLk9S7D(u(kzjd$1;gH3_WgVC@Um{$Sk<)>^Rc2kS#%eGaTIgY_I( z-v{fbVEr1b*TDJ{$i5&;AU6TI8ORFA8py>USAaYRZce-$DHoG(htK%?~tx&}7h5 z&~(rOKpO;F8EAE&odzAyLqKl}dVA12fSwF`N6w^5bz)bJOlwpAm9W9JPQHOL%>N0cnJbthJaTg;5!KPgTOQh%!WWG z1nz>s-4J*$1Ul}Az(Wvt7y=)Mz$YQ_9SD360zZPlPayCH1pWqre?Z`05M+WNUkH*Q zs0jo$gP;}=++kz5uO1f!3cvunB^jL$E&t2SRW<1owjA z0tl{z;KdNU6oQvQ@Nx)tLGTs`-id?YT@bt*g7-l1J_vpgf**q5BM^KHf{#P+=MbVm zNFN9(g^*PcvK2zMLC6jWsfCbT5V9LW9)^&oA>KG>3c^N1SQdnhgRlt@HVML3LD*3U`xwF% z2=5Ew`4E05gwKHRS>OmSgYZfSp9|q@Abc}~Z-ww35MB%6_dxi45dHv!AA#^=5PlrO zpMdbEAp8V`zYgJ-A^a-{{~5w>K=_~F`4Ecmfrus$(F`J5K!gSn$q>;MBD&)sq9;W3 zhKSJ+kp&UsAYuYUOoE6yh&Tul$06cTh&TZe&qBoW5OESBUW14?AmR)}oP&t-5b+m8 z218^dM7D#-1c*$6$P|d|36Z@aas)(Shh|Ge>aS%BHA}2v)5kyXd$U7ji1R`fb z&(M9qY#SrAnQQ56t12cqUd)B=cF z2vLh5%CQ2XRzcKWhYO1kq<8`aDFx579qEj2U7q5Yq%=nnH{YF#!-01u-$;h>3%kHW1SeViF*x3&eDV znC=kM17dnXOkap8gP2-~c@<)QgV^>EI|yROLhN{modmHt5StINg%Dc-vDFakfY_xF zyBuOyLhKfZbwg|o#CqQU*j*627h)SA_5j2lhuD`P_8i1sg4mBB_H&5+5@NrG*lQ4b z9b$ig*gqgH2;!n3E(YS_A+9aNb%VGJi0cV)y&9}4l|5FZ8cF%X{u@f{()H^lda`2G+-5aI_z z{7{G=4)K#9J_q9SAie4B}(!ApRwYzXEMEXwwzi^o2J4q0InjGZNZl zy30~bZ%93$U9aPPQ%xOx9G^n_FQEN*(EfW!&>! zA;Ap^cSFK)NH_rr&qBgQNcbEQzJx>x5?eu{6%x}Su@5BngT$$jI0F((A#oiz61PI) zHb{I95?_VH*CFv2=wN~lzR+PHbQlF4Mni{c=&&3*tb`6vLWh&k;U(zsHFWp^I{XAl zVUQFLNo^r%1SE}zq=}HU0+KdB(k4iH6q25Wq~~#vbPdTw0Z2IkDW@Ri6-fCUQm#SDbx3UusnL)c3#lU@bv&d_gwzU1tp?Ar zBlR9g-4Ce;A@vNTo`=-;A@yhI2z2y;juFtYEp%)T9fv^2G0<@=baa$M$A!>w5p=A9 zj(eeF19W@|I-Y@!=b+=a(D5he_zR>(Kw4W!YY%B7AZj==2wKZVH{7L+2RioCuwhpz{RiTnL>fL+2{!yc9Ywht3Vq`4DtI44vPG z&L2YO%h1IK2VGh~7eD9{4_%U>ODc3330)>Ymr2m28oDfpE-RtS9_VsEba@cEoQ5v% zL6-}V?hENHA-xr(r$Bl)NY8-uTu8S=dNHJLf%Kh_z6;WyhxFGV{S61CUxoBvApKY9 z8US4*plcL#&4RAE&@~^rE`_e^pzB@G^#pW11zlf(uHQjdkKOOkEgZVFfo|=fTNZT7 zg>L!KZ6$Qu2;E%J?E&a^9J)OU-7Y~l$CuFUYv`^)_fY5_4&8@B_bli>4!T!D_eId% z3EdAu_ao5#7M~1p{7z0dK;9w_%_^3=D*Utzlp`44eW3?J)2j7`Pt>9)y9{ zVBifH_!|t$fI$Ob&>$GJ7zV9@LF-`91sL=R4EhWPC&1uNFt`g0t_H{8s@j$bJ}d5QcmTL$1J(YcRAU49$R{Jz?k^80vtb z0){>hLtlfTZ@@5r7#0Y_TEnnx7&Zll*>Nyz2MpT-!}h_jt1#>r81^d+?+e3+!SE3< z{2&ZJ0>h8N@ZVvC1S6Wjh*2}@ zk=tQp4UGH@MqYKm$nRiOHyG6qMh$>bt6aw};VbFuF60u7c4^Vf1ns{XUHT6h?mzV-jIZXBd+XV;u8fjDRsq zV9X;h<{23C9AveCEDf>@$SQ`cS&&r*SqCBO2xJ|DtUq9^1;#dovHfA}2pBsG#%_kO zwJ`P`7<(1Q{sLovg>e<p^5`4C5Y$anFNe+({VU48|)kUW4&@Fun-JPlNII!uX>w z{$Ut@119*u1T#$N2NQ^mWQ24t5)b{S+>L$(vLmq7M%$X*56Ya#my$bQ!W*`Gr8=aBss zWPbzMS0Vd4Wd8=)e?X289KOLT(J?#zSs9$W4XZ36N`p+-k^O4!J8KcMasOhuj^ITMM}dA@@FT zjIr$C+#d4Z4@40$n-7YBJgA#V^4@`ga(6v!)qyqS=<8uDC_w*~T^ zfV>wV??uS_1oFOxysMDk9P(Aj*C9U(@^c|SAM)>k{QZ!B5c0o-{Ogea0~Gi}K?oFt zK|weaL_k3z6eKyIpc@oqKtWF^=nVyZp)Otrw&W-zrSO!bGU3QP@!sSz+W8m7j<)V44+0jB1{ z)D59|ZMJ_PK;!CnaV>0rMT?6bgL4)!@Ghjal_IJU40qhsS{xR4+1ik?Kci<@UgCYZp0--1v zio&2M35rsoC=H6bKv6d+S_4I!p{Ndu>Y?Z$6x|0!4?@vlD0&o%o`9mKq3BsCIt4{n zptv~{Yfu~t#ohGIJu7en!M zD83VlXF{u_nNhFlCgOUU& z83`p5pkxx1%!ZN$P_ht8wnE80P*MjaFG9&_C^-uy*P!GEl>7!W0$@f2%!q;+Jz>Tm zm@x!q%!U~YV8%k2Q42Hn!HoSd<0Lp{ya6-Lz>MEurWs~hVCE2*IR<8qg_%y6xe8{k zfte>@<|&x@3e3C$Gk=DeH=r~UO4~tc0+eP#=_Dx4fzs7b>VncOQ2GRvz5t~!Lg{5F z{R&FI!NIH+FiV432F&UNvwFg;-Y_d4W);J%=`d?E%&LW1_rR>@VAiWJ>vfp*9nAU_ zX8jJcV_|j&n4Ju>N5JgyFnc1*u7uf(V73!x*TL+AF#A53{T$4G)d90#huL4j?C)Xr zk5Cp0WpPl}2Ffy_Y!Z~^K-pXE9Eh4S%GUIFFRQ10;j+XdwZp!{Aae*wy0hw{@<{tHx?pu!g_Vxgh~R3t-1 zCR9vwsO$%o1E4Y=DvQBU zIUOoDLFEpptcA*BQ27*8o`A}Cpz;z_egu`j!yF0bG=VuuFee@6bb~orFeex0PbJV9s%v^C-+Y4|6_-IiJFuzhG`tnA;rY#^GRY63k73x#M7NKFlqIxeHQ2h{8 zKL*uLK=sE^{S8!KfkpnXC=eF4hDCj0(J)vv0v64JMe|`%6)f5biyC0j0a)}hEIJ2^ z-hxG*t=$J4X3r-cI7Z{Zkp+&O;AjBH0dRZ;j_<+oBRKnjb0|26gL5r7w}8_P&QHO4 z1)SGFWPlg|Vi1TeAa;V-1>$QEKY;iN7AL^sPO!KOES>|49k5uy;^#aMufpQjVM#Ms zqQDXjmP~{tHdrzRmfQ3|MVaX4$X8((oL}RG%S4& zmR^8mp|C6tmbHOpg|KWoEV~ny)x)y;V433qSoS$Ay9Udy!}3&E-W`_rfaP;xxf7Nz zhUG88@-wjf9IVh_MJTKYhZWhdVhXIV!-^xY;t5#s6s$DEN}mrRWHHn7O+}_)ds8{39Bc- z>PfJ=23GHd)eW%vb69;1R$qrTsj#Lytmy%3%3;kyShEP$ya;Pf!HO&~5q#HeCT%FK`V8*HCaR2G<&JtpnFv;JOH|ORzZvHpjx|c-UM5 zn=4`ST-f{~Y(5Q}&%%}@*pd!gy1^C~Y^i}QJ7LRZ*zy%@`3AOjf~}sL)~&r^>k`#~Lt%S3Z0`-* zN5J+`ussvDkB05}u)Pqr-wE4i!uHv)y#lt+h3yMq`!d+R61J~_?dxItM%ca?wm%Qs zufmQn*f9cjR6AhD-LPXH?AQ-G?u8u>z>bGt$5Ghv66`nwJI=w5cVWi`*l`hdd<{Fk zg&o&n$B(e%7ufL!?Dz|6e4xe(HPKL$05x5qraRR1hMIm*GZ1QqLCpxL@!Yi4OoEz9 zs96LxPN-P|HOryK4K>@LrUq*6ftuY=^C{F^ftuf-<`1YfL#+jBn?Y?$s8ylXfZ8Ca z4T0Kds2vQoQ=xVy)Xs(4`B1wMY8_yyT?@7A-5o}o0`d+*?fp>u7}P!iwNFFsGf?{+ z)SiUem!S3w*lC8H?P2F=*f|$=*1*oau(JVn9)z9u!_J3c=OeK5P1yMX?ELWf?$Ms} z?jG3P0J{&t?!&PAP1yYb?EVmT{|xm&y${rfKz%IK$3y)Hs2>mY6QO<%)H|SFKz%LL z?}PgNQ2#vCzXtVhK>g3K2iW5Sd!k`a0_^DkdxnE!&p6mK0rm*kvl{lSg*|V;o_Ar- zdDv@#z5cLQhP?w}?(y&uBf%dpQ1`+{I!Fzo9K z`-Z{35wNcq_RWHQWjNTk5%z6|eKoM}aoG1f>^lkjK7oDT!oI7}&>9+|p&=F;hCss@ zXc!9(bD_Zr4U3`SUT8Q94G%-ZIcRtv8ZJV^Z?NAC`z^3P9`+~0{#4jM1on?{!2YqY ze-`YY5BsZNe?9EK5B5I*``?EBAHx32u>TJ@V1WZo;Xo1`NQVR6;J`RIkPin6;lLs| zumTROf&&e3;1C=*3zZUK@x(mVz8~E>*KoTH8 z$PR>rkc8SgYF%~JIt)ptSxLU2&T~S-tUu)glx_7lM_TGEQP!QQeR!9PYY#`E<{@_qpc|KbGMaF#IxxU(WDH82%K)pJDh2hEHVpWQMO}_%?>` zU_=i_WHKU~5l1rO1V)_1h?^L3CnN4=#D5s^9wR@gUC?juTKAO=dG5Tai zU(e{jG5R(}zr^Ub82t{Tr!#s1qZcuHAEVnC-NBe*#{85qM=|DijJcdKS2E^a#yrB9 zM-z-0$C&AinaP-P#_VNGEn^2Tb}(a)VC+SVy^^t4G4^4`KFQdp89R=#(-}LHu~m$% zXKW+mdNMAHaXF0pCF6e0xbqqJcg8))xQ7|{1>+Lq7&n1&s~Pt_gjbpHE)(8o z!WrDKBi61d>2@}6# z;%X)~GqICN!K4x<9mS+$nDl!l{gFvmGbzEOrlNU32C6iY%r7u(ZF{OYhw=!iIQ|@ERe5QQOl;uqA$J7$0mNE5irar*bhnPB^ zsWX^5i>Yl)>&mobrk$T)+9gc8jA<`1?JcIg!?dkT+r_kMrXS1nQ<#1l(;r~^<4k{o z>5G`Yg6S)nk;aT{X5=#Ca%No1jO&^42{T4AV>C0Wm{HG+MrIzt%%3weaXd3`XXbs( z{0B3qGIJg?7cjG(S>2eG!mJCKbp^Bj$gFpm^$D{+W!83PRWoZZvrlIBS zE6ja|x$iM|4Rg0JcN_DHnfFuX9mTxcnRg%a{=vMd%$vu&1R=4BUq5af^-&Su;3~d{FwzevS1VoCbM8F3$ZYjg*{k!CJTSV!r!v+eHMPk z!Y^1@&%#42Jj|kVSoAv zSenh!LY59>Y2p@^{+*@6Sh|R%D_FXcWhpF6XITczu4dU^SauW3K4#enmW^Ur6U$mz z*3Q?z`8`Q|RZ`8(gt;F}+urEk7sc`D2MvOJUJ zSFrp#mfyhg|FV2I%SW<&FU$9{yqOh0XT_&YgxID zmFrnq!OE&PR}76eyZ(>wK7aFX*TkEN8(DQTtL|mh{j7R`RS&W1306JDs`;$?mQ~-e zY7MK_v1%KucCdOdtB+^(iL5@E)u*!h99Eyp>bF__8LPiw^$1pvV)bNJPi1ujtJ_)K zNn*{9Sd+w>G}iQH%>}Hvk~LScW+-c}WzEg3`5S9KX3ZGZjAPA2)=Xy29M;TZ%|X^C zv9=p)Q(4=CwVABVX6^4;JCwE8vi1hn{)M%-v-VC2YbUUFE^Ftrb`fhAvvwtGSFtXe zb%R(}%DNx3?kB7}j&&!n?m^Z)&AMk<_X6u)V%=M;dxv!^S+|9C+gMl5x(e3SvaX)> z$Fu%S)}PJ#a}%sTkM$R`{!-Sz!}`xz|0U~3vVJt{r?7q+>s#2+jSVSm=)s0GHe|CQ zmkk%O;ZJP1h7H%V;m>TijSY9O;Zrt@XTwA`Oku+`Hq2+kLN+GS*qG18{%kB{<3Kk4 zgpEhC@lG~A#KuS1_!t`>XXEp1e36Z-*tm_2JJ?vk#ws?}v$2s)Wo$Z*O((GF7i>C( zO=q)d2%DZ|)0=F1n@#VL*z^ILK4;UHY}(GIS~k_QsfkVd+0@RaPBx#w=Cjy*4x4|? z=JVNn37aot^D}IIgUxTT`CT@@&*snA`~{o0vAKrLb!={A^FB7WvAKiqPm=KcIedRE z-=EL-7x4XMe1AFLkLCMWd_RZp=kxtSzF*Gw-?Al{Etzb|W=kGh`mv>iEoE%EgDnrT zf7`k*!nMI*qOK*}9NzNo-4JTL#;**_O+;fovPZwtLw2DBJ$UwkO#36x&{A+pBC_ z!?wf@w(VqF729^Rt&wf}*nT40&u04&wx7rL-?05sw*P_c&$9hZw!h8xkJ}pwr^!eS9WBu<5YH>!;Xtd?6`y-e`3ci?6{R3!`N{@J04)i z!|ZsJ9nY}id3L`Z27cXp<+vkyBn*m-n5DzBjOIx26W z@;)m6LFHpq{+r6DsC<^n7pQ!L%D1U}kIE0J{FurSR8FLF7M07W{D#W4RBoVhGnHGZ z+(G3YDr>21pmLvt%4RA%sY;IABON!6KDokP{Hsrn67 zms0g7s&1s}?^F$=>S3xLrRs62o}}s-s$Qk)b*kQ?>RqZnplVuzs_j(Ou`8Qhh3q<# zT_>{ZY<3M{*M;o5m|d5#>k4*V#jZcI>n3*H!miucbtk)?W!E@%&0*Irb~Ui8josbY zox$!bb`N0pKz5g~doa5bKVkQA>^_m*C$sxBcAvrStJwV*yI*4W6n4*L_cC^`VfPMp z?__rkyX)EA#O?#^K16j;okVpq)!nI1qdJ@FK~x__^(j=JM)eS?e^}pFUqJPRR9{T> zRa6h9`Z}urO!ZAv-%0g@R6jxWt5m;6_4`zRMD-_Be@69}RF9{664g_wo{-p8 z9qdhIZ(sHnviAt~p2*%`u=h;%p2OZ>v-dab{T+KRXYU```zQ8Z%ibH<`!IX|$KD@~ zyZ3Hl?@snMu(zGPoz$dI(}S8`)TC3BNlgJYMbr$Uri_{+s5yt4TdDacHUFh%I5iWf znMuu3YQCms4K?ej*+k72YPM5TO-&6oiF#_9s5wAQ2erx6rc*nB+9GO?p!P^=kD>PG z)Sf`?FR49~+HO^p?MiA_Q@f7ZjnsZmZ6&q4sohI$9kq?r9;Pmdy57|Fr)~gsgQ@!o zbw^S6GwP0`?lkJopzbW{hER7Nbr)0jL;tV4nYw$ayPvv8se7EdC#idey635Tow~QE zdzZQosQWKOB0-5lz^p>73r>#5sB-4^P$Q@4}48tUq)YohJ|b%&@A>U&Y2M}1j> z`X5t&EcM4ze-iblP=7l0=Td(@^}nV5BI+-p{%Y!Prv4u4AEo|Z)IUf4i`2hD{cF^} zN&QFEe?t9d)PG6+Na`n1Kacvasoy~TChC9i4(h9@ucp3+`g-brP}xd-2Ms|(5)ElI z!?83RN5g3}oI%4`Gz_8PJQ^;h;W8Sopy4VShSG2o4R_J-01Z#k@C*&F((pQo zhPP;Vmxd2$_?(8}G>oEQEDaNAm`TG@8rIOTgNB_n)X-2*LlX@LXgEY;(3nJHGL7A7 zOrtTI#zGo@OydbOo$q6_#BNd z(D(+8Z`1f5jUUqZF^wZ=98Kdm8Yj{?g~quwE~jxljTJOj(O6Go6O9LG{6TOlO-Tuw zl4)6+CPN7IWmy+YGFG`&yLM>KsxQ{pq4CeSpArl~Ydr)dFAi`aKD`>tW%b?o~y z`)*|49qhY{ef!xT?C-+(d!*#CR>4`u(g?7xBie_{Xa?7x%!tsF?d@f4xG(_%Q$d52kzp)Jsh}?1OK2oiRLt#vuQqo=F@1t zn&!J{euU;nX?~pMCux3$=I3etg645FPoQ}+&C?__FQa)m%`0hMP4ij~{*i+>aPUqJ z-p#>b9K4@{4{-2d4!+I7k2&~14t~zT;T)X7!Py*~$H9dh{E9a6ZNH%HRN8(?+nKbTBcbg&+HRrkR@&~M?QYth zr0r?io}uk|+FqjVRoXtJ?PJ>hN832sCeSvSwrRA@q-_puOK4k0+t;*hpluUvTWH%( z+fLf5Xsf5Kk+%H_+78m*llES;_olrs?OC+v(q2mYNwl9r`{}fwLHl{M|DN_sX}_HI zYiYlo_F=R?Nc+>YKTG=yw7*RIe`tS$_77?QFYW)M{d3x9(w>-2`yATm)4quICA6=l zeFN>AXx~M9HSIOD*VEoa`vKZJ=s-soI=a!(kB$O52GB8(juJWs({VfozK$wA3EQV(D^o<@6q`oog?WSP3I&! zr_wot&e?P>qH_tIU(>mQ&Vxd!aItWe@F(F0;ZEUh;UB_-!Xv_C!qdWY!i&Ny!aKtI z!siKLqA*#QCCn8T2ww@yg_Xi;VV$r|s1oXhCeh^*(dAmv<$BTO4$Sg@|NiGj_C59=#rQxx>SjzJ|gK9k#x35`mIR1Oe75z zN!N*_TSU@bBI#a{^o&S)P9(i9l0FbgABm(-MAD}sX}n09D3TV5q$MKhYmu}1_5?#B9uH8h}4AC`9bj=lA^F`O|MAzFz z*E>bmzl*NJMAzw}>tfM$sp$HR=(<95>n^(Gh;DhJTYu4Qfao?@bUQ+H`-|vyx9E0{ z=ysn(bo+Ve386JBri*di9vq)(Zsa-{CFOk|$ zq!x(OVv$-ZQhzKGsYi*_pNZ7tMCxfGb%;p4P^A7|q~0h}Zx*Syiqtzq>fIuBm`J@} zq&^^09~P;PiqywN>dPYaRgwC-NPSbJek4*q7O7iA>K>6=BU0-{YNJR!EK=J<_v0j@ z`>#azvqbkHqWiB!_uq@|mx%7qiSBQS?(c~1?~Cpqitb;C?!!g*9in@k=-wc@?-Sh* zh#o%`J$@m2oFaOhE_$3Hdi+cDcuDklMf7-0^ms$`EEYYF5j~F;J^F+@J zM9&LF&x=LR%S6vBM9-^4&s#;$J4DaBMbBZP=l!DRKSj@nM9)V=&&Nd1`J(4{qUQmT zmM+qMCDLvXX^)7sw?*1#BJB&2Hch0>6lrrr+I*3gSR~Sxh_tUo+6s~Ook%MeX_X>v zmq@D?X*D9PUZgdNwEZIOpy<_A^vV^zjupMG7rlmwUZ03wlSQxjqSv>g*LR{34|q2Sxh7MfxWq{R@%4SfsBM>8leWeVs_( zBGRiwdbLQe5$VmMZx_+GtLU2|`t}fgGezHQ(f4A}_bSo%F46a1(f2vg_chUXj_A8k z^sN^eDI%l0$ml6DdW(#{BBP(kC=eM%B4d!qI8tOJE*BYBi;O2k#)~3jqR5ygGPa3~ z8j(>aG8#q3evxrdWE>Wm#Uk?&SoDz{!CUTAtIX@LS$BUd3Mb610=QNRXsmQrb%5?^ zr;7Y+k)JE_3q*dA$R8x~e=PEUD)NsO`NxR-Ux@r6BLCMS|Fbw@UQeA^KH_em~Uj5&ddK zzXs87pXk>t68*c1{wbn=579qO^zSYDr;GlXqJNI)|4Y&TI??}0(SNw;zgYC&EBbec zf@D#UDhj%bf(%iRB?_`d!5~pkDhiGl1t*DuQ$)e(qTpAe;A~NFt|&NP6kI701y_lJ zp`zegQE-DOxKR}RRTTV96uc}7CW?ZMqM%g_C=~;Shyk~W0gs3QPl*Aqi2-kl0UwD0 zBgBBQV!#YBV4)bWSPWPx22_Xvd&PiGQP^D+9wiD-5rwBEMBzoE@Dfq@2T^#XD7;z} zULy*x7lnTjg?|-=e-nidi^4}m;p3w4Nm2NWD12TNz9b4?6@{;h!nZ`>1W`C&6s{A6 zdqiQ2C`uParK0FeQFOT|x?L3ACyEmP6h#k+qQ^zi%cAH_QS`nj`b-pkDT;=Rq6wmC zk|>%iisp!-`J!lnDEdYeeJhGqilQx|Xqzai5=GUbs74gki=t*x^n-v_QPd#@=81tn z69a!Q1`ZX8f!B(GH;aL{ih*~Cfp?36!^FUc#lT0!z{kbFC&j>L#K4clz$IefdNHtC z3~Uwy4~c=TVqk|T4pE#WinB%WAW>W@ipxatU{U-_QG9_Y{+%fPy(qp^6kje8#eWpV zH;Lk#Me!}7_-~^4eo_2SQT((heohp>D2iVZ#jlCtH%0L~qWFDLJW>>o5yj(0@gz|^ zRTR$<#j{26JW;$-6dw?SlEt6_V$kVg(Dh=_%VN;yV$j5d7&KW7nkEL#6ocl7LG#6+ zHDb^XF=(e4R3!#gi$OJFP`wz`B1*c6k`z(WLzMIqCF!ChQ&fNbb~0}C`ub7qO@IP-a!%|q!1vK&_jn%Q%Ht1Kp>Qa7Q)P-7pVgbB|$<$ zS1c&lK(JCo5CIFKVnsy-L{Sv|o=HU1@BIULxM%Nm)?VlAeagMCe39kc0?TT{^MY}4 zjem1f>Y8pf0nJUZrdFmlrX*8}se`GLDb3W~)XUV@G{7{_X);xq{14boO*~9Zohv-k zOgj&p@wA(LvwX6Ba(qVnjQ4rSXM)c}pGiI?K9hZ>I13w|vhTJ&ZGFai(0a)FiuG0N zYu4AT=dABo&s#58uUJ2|erEmLdfj@%dei!&^-meuBzwr-vX5+)WjRO=mP6!FIaZF7 zTgt8E4sxp8QSK!7k^9R1ewT+sf zwpCNqRJEg;rgl~PsD0IbYJYWrI$Ry0j#NjfHoH1T%~uQ53F<_3k~&?Tq0UrisY0En z&R3VK534KG&FWUQTHT@URv%R#Q=e3yQV*(!)ECqj)uZY$^)>Z%^`v@AJ)@pgKUcp{ zzf`|cuc}|G->BcJ*VOOS8=9n9v<6y3t&!GPdqDHjnrPlyxE7&BYEfFWW{=UDY0b3< zwboj^)>cc=x@z6D?phBmUCYw4wH$43PVH&!8SS8UNPAY9s6D5>q#e~>*WS=hYj0`qXqU7PwaeN^ z+Q-^g+EwjqyY`*-qxOrAZqhyU#`*)gm#*o-dZhlK9;dg~6ZAwqMenGm={@vxeV{&6 z&(?GF(Rz_yte5L^^a_2hZr2^U(C6v%_4WD&-KB5TH|d-8EqbN?h`wEaT7O1As2|dw z)t}R!*I&?I)L+tH(O=U~>ZkN8y8ToAGyQY@3;j#|EB&hewf>F%t^U1!Q~yc-MgL9z z!)RzUG8!8X7`}#|;cqlG0*o*t+=w<}j5s6S=w@^`dKf*8UPf;t-RNWVH3l0)jFHAD zBiAT2ii`=yRAa7THyp+iW2v#qa2i#{R^t(4o3Yc_YwRl~<9q0U`IMg}0IHKb@8u0+fOu>#0drcvxP*a#G+!SGoG)0-BO);isj=_#0 zjts|8$1ulm#|Xzr$0$dpyH63trWVfN*f0-MOJ`bagoi2KIodzkl<0I!ZS7%d>-;1< z+QZb|xu$)jhpD4;^@3g=rY_F67lo`H?C;FUS3FGJoKy09dzgAUU&s&gzb7Nz(V^PZ z$B}(6M1N;mY@peS)pWoVFv{ZR;o;#)G|?u_q&X@bJN~b6CH+O{VRdxa?IC+89;%1t zp?eq}z8-##D#upGBaUs3YR7g*jidIjU>*U^(f&~$9>H}(@Cb8$5*}lUtQ&zxjPsK@ zn)CXTAs!wNI?o@2l6ozh3*3_1d4V*M6v8 z`}6hM5C2p9(K_v~{7d`mZtd^o&+%+Zbc1GY?G0L(G)Imw{pgl1Ot&1PoesZH=hdng z4y2DQOloS<8gF&u8dtk>COqD+!+Bi#SJt8q)NHjSm>UYS)!Fb|O0((0JX@IO3iCW+ zUL?%Rgn6YfuNCGE!n|3Sw+i!iVcsdsdk%~}chTFwcTSJVi8QX3i^D{SRb(ea7>D zX>cZems@1573OKeTv|t77Lj(acg}Dlye7LmJ|WpXN$uLTtr%GzpPcw_?!DH>|F1c- zK4E>5IO_pno*~RL>v(55Z+zFx^EvAg=iP7pJH2E*N}Tm&VJ;WuId%Suh_r(#bDvv$ z@N!mpn>Gotq4Du4?yxEC+WkB1aqDUKkUG6(Jwu%JZDFxa(x>wcQ`vh^e4te*(;d|_Tt$Fb1)%l9ojzqEeqj-g(& zen*`32Vq_;%uDM0OPxc0XyN&j^-ev(Z`R+5v;HB>%Z2&jI=~9&qdzqFG|P?Z>OyWT zKR}$^M3`3z^XfXl8mIaCgPyAF=ZwGZulmbPiIW3`*(uEH>ip}SIoD%6!{wMde>1r` zadHb`b_w&wI{zl;f$J?ii|{GCO4uyyU0E22ztuBh?CQW z`4M5>R_CvF+HS;n4wQ%16AY7w6DN-p<{DwHtpn_E9=h?M=V&?KP0*x3F08NYUBbNE z+4yF7^9k~l|10tGRC!uGdap3=a}K*1-fy;Sccj+JQ?mY&Vjmv+iUrEVBgKWix1Ii|j zDV!?I&kOSl!u*^g$60dgVdp2mG-`B?@jN6yEkEN-{W(Ockq^p;ob7*3lb@GgbO(Jw zm=9OWhlTlwJ80A~chHxG`K9|oUzPXEuM;iI$LhF`I&z$oe_Y@kbz4n)Q-15d@H4{v zO8pV|rF_m!^NujTdY|TkykCCTP4jvk&1;UFTx)yhl=ottKizKD_#{W`<(J1ylPCB%_htpp46rD`Qqm_Kw@+>QuQA|1KaN|Z2PzQ8w1jMB`VeF}VH zlom>?{HHVhw{YzvVg5vz&%8q;rIpfJi8l>Y+BogMHE;8=wQ*g>l|&^eYd}W&q=Lek zgKVW^w=2n%{L@g6N>SQ5Klm-cv%S*6`O9w|Z8OeW(N(0`HMP5Z~s%#KwZUZ*2%cwmN8ywrNk=}l!>`kpMT6mcU!4Vm_HWg zYgUhQG*U{G$;y-w12S3`)H%NE+JO4GnWmKH49MtTR%*A6nNaBNR-;T)$|xB=Aj93d zuXv_1`@Z6GVgBKs;JI$Wc45B$U%~U0SVgT9e4|eAyZ@ZGCGL5X?$g_sD=Y5vuN3AV z@A0p3^RE@=TmR+Xpu{SEb^Jfo@xT91{w;2P%YFW>$~HIsBf@;UTB#Q1UvkpB_ZVQC zSeQ{db=;)lQO^87)PSAJ9!KhSWtXyBnC}SlZ`+l<%06NKU6}7W*ZeUi>M7-*Beh03 zpggTSBg}sa2_czkltaq1jh70^Lr4L_f;At{_dHZrtuzK zZYsYzQg#O0IYISs|#>e6L_e83i8t6#frRu7o`pVZ; zf3>L^Af#X+g$k*6xO;DuB83z!q-ML+AT?MGQA5=*HC&Al(t|>3C8Pu)wG~o(A$1Z` znwz)o1XQEzb}}i%L9R8?^|oYc>56M?vYJ}B9+FI>8n>=@M>uf$obS;{O{#B4x!$yx z`Za7{-->aCHZb*d9kG}!jXT$ynmce_IoBu5eS!2+yQ{q%DcjW^YEL1x5K`=RwYQos zq&Ok9bUoa_)I4mEn&Acvb^}`10fwr>gcL8NHp6C&D<~c7I@`b$7?!1ucEhsWu*5o8 zu9_#LBq1gL0~S!Ej{6U6y!w!kQiRm5o@cT;%?+61=IKz!Q>vB;DOE@v|G{G~SLgmm zm0fiRsk4x}M08oOx~{w;x-6bt_i!&&Ug|=1$$z>nRhJ2=tB|@ybeVVbuWo-m+{^eX zM~A)YYITjeR&}cD)b;8HwTnyLsBRKc4MQlOp$?p9 zK1_Q+Jzn1){uid@C9Ai3y52U@P1m{ZCUNXz->kZq5A~dS$&tESeMdd7UQjQp@2c;q z@2ejODN{&ULdq6Wj*vzRDOX5&LbB~9PQ9#tL>xZq2Slo$x$mOV7$M~g&zlZUA=w;O zAr-jhHZcXbUiLB_an1KKb++ePjq-B$D^OBJY-o9Wo0QhEp>2~BDk}ckp4A^5xz+Bd z_m2woNA-8Nuv_X+>d)$J^%wP5^^W?RkP3xVB&1>?jTO>3A&nQ(LqeLcTfIx9`lp6w z(maSHT1XS!QYH!Mgh-ao3QN4h>d47T@7`mmZA?Ggn8FFUR%?3q9vNdxW(=@RA6INE zEt%@x&U+M$D=nFtHnq^^iuX1xbZbwWHf`LrQro1`df&8uwpk@*r6VWW#!YIOKY8-K z(lj)y=I=<|t;w3AshXzgnxXk>enKh{(qth`5zeQrv#YgaA(ap9ZyPu1UN3CZr3w8b@9GZp0Za^zNPy@lks*V)keY8j5yU0Oe_zcxS{s14Ev zYeR%2gfve`^M$lPNDGCuNJxu!X+yPP+Hh@zHbNWK>Y0WtrX=U1UZA4~zLE)s*aYf?_rwVDMkXHPiv$}Ibo2`|*cFU#^&+XbA*G}0K zmG16lufwh$o>5XZHNWs4+59@|+EL>sl@?AdvgH@PQ~$O>fwtItx&tAt6P`!vd6#L+ zow)&qv|W3c68BU6%Z0X5Tjg4(nBqKFQ{sMTG3wqNdTCB=qa&r(=YM@(&^BqCiPI{D z@*j;-s3Ta2RNpp;L-S#y_%i7D@t8Viv zLfYm2FqPwK>1PU3v=iDXxA~-y_Ec+c3Tdx1FDf8WJL~AMU3*(QC!~EsdgA`PUC`ch zq-@tNYVQi^Q6W9HU3*{qKuG(A^tkhANSN!ipD8p@yP|#mAJiAxmqL0{NKd&M`J006 z+PB&@N9wlww`=VO?Z$l_H-+@fUuTK-lUvEp+HD~n6w-6|mE6(p{wKyC+Mhx?B&26Y z{k^e7bSbYtqWTpC((^)k!FAZ*6d<41o9Ny&(tUI*1-hcEt}{NS zexZlmRd-xSuU*u2-Ozn?KN{)&dQ;N?T{R8V16>`grh)b&LV8U|Z#cN1{k^F45Ixkr zsNDPHt3rC|-je!jKh~oixixx>-b`;Uq@zMQCZw0|t%DKuE9qYIuf_6LQ*Tq}edQmk zWa$4|CjSylfoXvIM-S9f9k|EzhLB#rC#AC^w_3Lf>2TfB(!1*2a;-70HL5AXYd}ds z;dZ^d`@&amY2voj<9h17Tq{*mj9KqZiTf$q^|5MdWY_!Z{r^s@K0rt(|4OSq*wJCT zK19zD(kUUmbw54B^-=#$tDdQ63F%ECogNg?r9AK7*AqQY&;S46g>~>V5nW0KhW-m~ z=;QRMj?~@yc>N)Lf<957q?hQE^(jJnTS(`G^p23u3+aN8E(+;gA-%U-pQe}UW%_h| zhCWlDrO!U}cvDlbNn5C|aHQ_i7wL=jCHhi*nZ8_q*mWhqWF+V-i6f3kPU%h}g>=Pz z$avm#SRKhi`qW{qKLvZtoNSv^P*{*&S~ziWkp0ek}8^7ut-%((U@ol>DQ8 zPOtC$s*rxD$G@(>;W`^^GNj}B2}<0Ln(NPCQ;Oed{ar`u9{nx-jDA*sTR*42qo3C= z=oeiBLrft)@9Q7vmx$9Z>mL!Pf8weLFf2E4yEn zGYU&<^dIyalnfh?kx^J$H7`?jNx9+xO;Jzoy^$+o506@46fJ>eMhW%#PF= z!(@0Ep2DJf3X54-q#EsU_XG>6#*yRtE!33Tv5BELQfm!w!^f~1vamD|mWIO8NLU)z z8Y*#yZWzMyfV)gA-ooOOYxQwolL{vk=9d;0xGKX;9pkk0?mfDdOqgJsJgu<6T{8c? zf_NE0Mu;P&+6WdFuWBRIWePWict#pgt_xA7h$1hexzXajJ62e%_s7u6X#KBE*hnxs zI8t{RiAIu#6Hv*?ntdRh6zhhwJ}0ig8#8`-TOXoWEt+yEbg6d zb+s{CSVHc*Y(~D@H9|-$s*M6+2_1CL=VgpF#@~C zuhf|3PEUbmH)a?!g(Xx-tA!;}SfXl;g1X-oE@z~vxjxTW=t$je6c`KKAWMw`q>76)%P zYVYT3hp@D|&$ipx1rDB2XGpEl0^$Mv>x&h>J% zDcra2TUOt~S%n2@Q}f4;n_f8eZR1_zJy$}EDW?Am;B^eV-U6=+;B^_iu7cM$;PnG| z-2ks2!Rr_Bx&vN!p@{}fIzW@&(4-GEu|bo3Xj16f5@U*yS3;B3&}1z%SqDvAu6JWh zA@*0G$tTd{4tO^J?_lsA4Bq+RJs-Tcg7>rFeFnVGg7;? z%i!}p_*@5{o8WT`e0~O>U%}^h@c9$09$@tWYXDdyz#0YC7_c@6Ya6hpg0&M^SAx|A z*6m>32iC)2eHE;yv17dm){nvZ1z5iU>vgc+1?!(6n?Uvi*$ZTEkZmAO0(mmXQ$a2T zxdLQ6$Zvpr9^{K4zX$RMAb$q(7oa47(gl>RpmYbNCny6!8Eglo5|lll>;vU7P#y>6 zSx}w_wIQfFsJ@{3gBk#8B&gA#&H!}*sEa^d0_rkQouIA<^*vC(0QDUGteIdJr49%pmzhk zALs)>&j5WG=p#Tc0KEwGhd`eQdI{*Ipic*VCg_X6u5SW;Gw3y-KMMMDpdSJK6zHcx zKL`4G&@Y013G~aLe+>Fn&~JbN7!APi0mBcBmS7}-F&Kp*YU~C5CTQF{cuL*oT!M8E^dV#Mu_*%i&7kvG}HvoKtz&8qfW5IVY_>KbK zGVq-Xz7Fu6555b*cP;p?1K(=!tpVR1;JXWa_kiy{JNP~ZzR!d2i{N_%e2;?f%i#Me z_`VLl*TAnK__YGRVc=H=ew)Fs2K;t_-}B)2BKREvzoX#yGWfj;ey@Yyaqv3{es6-` zTj2LD_`MH)H^A=>_}PC4e*^qOz&{N9JA!{t@b3-&CE!03{AYu|3;ef%|90>{4F0c! z|8Zz)fu=stREDNQplKE~&4H$s&~yhh-33iwhNh>W>1k+s7Mh-erk9}UWw1BB4NZTA zroTae83GzXfC2$+AfO!tjDmm?2$%x_b0NSE0S*XQ0RgKZU?T)4*a`vLAYcy! z?1O;EAmDKbcoG7xK%f@{c7VWf5I7$KS7Rq|Ed;KIKoS~9e?hw=qg3=)<6N0iKXfy=nLC_codIo}yK+q`&It@YRAm}^vJgU6Lda?eSq~vD2-yT7 zJ0WB@gggczk3-0l5ONUgA|_7K_^LiDVFeIY1Yu(#Y&?WbfUva?wjaVSK-h0~2oHepE)bp$;e8=I8^T9Jcpik0f$#zd zFM{x~5I!ElCqQ@^gwKF*Cxll*_*Mvi4Z`1o@Usy99fbc3;lDtH3K4-25eyN%A!0B@ zWI%)yBJ7nAu@xd-f{5b~aS|fFfQauQ;yOg!fQUaJ5{NWGqz6R$K%@+jfe;xCk)aS7 z4v~=%84Z!~5SajxNf6l-Vf1FK=dJqeh#8vfat>z{Tf7n1JO4j#v5XMA;uqK0wE?CVp`ZCCJtiS zLrf~fbb^>J5YrW6xIw;JNsf<5jq#Jvu2$06<}#Qg?wccEn$XqgTz`$EeZ(9!`d z=RwOu(DEp>d>LAP0WH6Wme-+`AG8XCRuRyu7ql7#t%g7=0j-untB0Z00ciCiv^oN< z-h)g)~`V8*P!)P zXng}({|NDc5FZ8cF%aJ!;`>AVK!~3J@nsM{1L8MAd^N<^K>YJ|h<^p*UxWB75dRIt zUxPLpv60#hCrK1&}Ig-nFVb&L7VN+rWV?qf;Q)&%|&Q)1KQkyHorqc zQ%Hz_geXYp1_}KjVE`mdgoNpkV4n#I8zA8kNT`N{mm%R4B%FqXuOQ(%B;16=rjQr` ziBXW)3laxG;t)uj3W?>ASOJNOiXMnb!}&~7obTMF%-hjy<(yVs!I zx6sbr?`LT55ADODeI&H+5ABCT`;pLo9<*N$?N>ni-O&C?Xnz3Oe*o=2gZ5uQ2fH_P z@P!Wk(4jkY=nowRLWdIQFcUh=h7PsR;W6m&ICOXyI(z~hK84f4(E~b~p<_$v*cR*^+d;=H=vV+9i=g8Y=(q+tI-%o! z=y(V^J_jA&fsP+S$B&?s1f9I0lNCC}L#Ou8DHS>uK&Oe&sRTN$g-% zw}RgB(0c~-c0liWVDEhvdcP07FG0F5q=!O!IHYGmdI6*tLHZ_0-wx@ukp3~GUxoB< zpidn1NrpZt&}TCAnFW2yq0bTM^9J-e0eu@mUj_PV(03^G&4IqT(04ub-HM&Q+o12) z(Dx?vy#@VJp`VWQvmC%0&^xp;jFGK$? zq5oAF&<+N4g#q1Rz;YO{4hC$10iVEtuVKKqFfh>$13SXN&M{u7rV?Vc?fA z@G1;y4ujgjphOrn0|q%@&^#D)4hCI206bx<&gImMkG8jA;20LKzMHu`s488(G zVqi#X7}5rYOobukFvMN~L!N>mFTjw)kRd^aH)L2L!v-1SA!7n$)I!E%knuQV+=ijR zP!AZ|35NE9q3JMmB@A`J&`mJ(YZ!VHhTei@`J3VkQD-1 zVUQIGS#gln9H}H*AZs9GjfAXH$f|%WJ7mps|Gf~hHbB-U$l3x~ zTOq3&vc7|?pCQ{5vL(p&f@~kimLXe%>;T9PhU_rNj)d%3$Q}&YS&%&gvK^2;53(0R z_7ccm57{oru7&KKki8qS_kun9QOMp8*-t?BVaPrT*{?wMYmog0WS@ZSQ;>ZgvM)mR zw~+lkWZ!`7TabMlvhP6lUC04)JR!#dIgKDkfgBBTVj(99a@s=9V93dYoNUOMiJcq) zIrAZBFXSA6oM#~C9OPVroXe2&3ye0wXipfO2BZ7H=zcJI1&rPRqc_6ncVYA=F#1y% zeHU^qklPS)8$qrCxqguA54lYtHwJQ>LvAAECfgx51#;U%ZYtzIecR=n_koz>`9)jHGAooSceF<`pL+(k)Jp;MtAol{~ehRstL+;m*`z_>t z2e~&P_ZH;dhCDlv*97u>AWwrl1M>VJF9`BNATJ#9T0mYe$QuNCLm+P$B&8VZI(fq;T1px|RD^nt<_P?!RR?V-@#5emCNVK*r335A(Zm=A@8P&gh6Cqm(5 zD6DXUp>P2dE{4KoP`C;T*FfPqDBKQ(&p_c3C_Dv)r=jp16kdSB_n`1H6uJd`3WeW7 zQ3EKlg1tzAA{~nSpr|<%#X?a_D2j)oL?~JaMJu6b3lvpBQ7shhgrYrA^e7Y^fTDv? z^c)nu2t}_z(N!omL$Nm$`$Dll6bC|a2o%RcaZ4!f0L7iir#KCYyF+m=DDDHr{h>G$ zinF0O7m96AoDaoCP&^ijmq2kX6rY0P8!*-v#-_s99x%2SjO`6$2gBG58228GyL2d| zyUEkj9tjhoVM1@1Fc>Cez=VfjLMcp`4ii?xgiSDE3rsi$6HdZ}H(|mzFyTj-@Dogw zVPaF57zh(nU}73f>;@Abf{CRtaXL(N!NhGaaXU;r3KLJj#8Y6O=qC6BCIOSW!K8jL zX#h-`4U^`>q=hi)d6@JHOnMC_U4=zQ$k=$Gnn!qOz8zv2EmjeFr@^h%!DbkVagVmQVUad!j!`><#m{H z9HxA3hbiB|lpkQKFH8-Eso^lS4@}K~sl#CE6qq_2rp|$>H8Ay2n7SXPo`9+6VCs38 zdK0Gp22=0Cv;deE3DcrsS`U~u0HzIsX_H{u445_xrfr64H89P-1E#$M(~iTmlQ8Wj zO#2O{-G$N+C~XF%4?<}lD9wP1R;-1(Y>_GAooR zP?iW~9igl{ro`tgKq3mra`vA&5gtDKZ>`$0(g6Yj+ zdK;LY2-Ew+^x-glBut+L)91nT1u%U#On(xlAAsrSVftm5{xMAd6=rzA3^U9Kg&ECZ zMhooB7zi^)!i-FqF&$>uVTOPi+hN8&nDH3QxBxRgf*GH{Oi!5U1v9;2W=oja7G}1C znZ+=3GR&L`GdIA@M_^_(%sd7&Pr}SMVdmE`^Crx^1+$`HR!cj~Y7Mi7!>nAGWrJC3 zVb&IyRRyzNfmv_DthZp+4VZNYX8jJcqhNMRnB5v?XTt1!m|Y079WZ+-%w7(&pMu#h z!0f{?`%9Sp1I)ew<$+Kh1?4eNZtn%}!?sZh}gD)OLW z9M~%!f{H~@u?i~IK*c_&cp54WLd9jM_!26v!rVqMSAn@2%uR*4Jz#Dxm^&WkPJ_8+ zFn1%&t%kWZF!xoMdm84Rfw@1z+}~mDA7Jka_P${65B81Nu~&n=2JD}M{X4M#0FDme z=njsa;8+BXRp3|yj*r1{6&&AyNCwdvL>h=yAU1;74B`tA--Eag^P*v1E0`A#^CrW* zSun30<{gH4ufx3Kc9`!8^SxlcH_RUb^Rr-n4$R*H^Y_F2Ct&{fF#k5p{}mRrg9Tk- zL3danV8Jq2@GvYm2@BqV1s7nUFDwj&h2gL;6Bg#f!a`VB1q*k=!rid&3M{mL0}HRg zqF7jz1dH0jqA9RwHY}P0i(ZCBr(n@(SnLgpePOXbEFKDrb6{~UEPf0YAB4rv!s0(* zNds8Y2$l?lB_m-;CM?+vOP+)!2Vlt$u;dq5atG{76JTj7EbRnKr@+$LuyhVAJp@aS z!qS&vnFPzcVVM<{^@e4GVOa(&+W^ZRfo0XO>-U@UR9C2Vv*o5O{bFJiHJdUJMVv1rNUq55Es9!eGUNup$;#l){P%SYd}1 z&%lZ!u;M7JlwoC4SQ!W_^I_!#SUCw+J_{?4!OB-)l^3kiVU;he%7Rq|u&M}FJ!6Me zM_|=aSZ#vUjbXJHtR4ufN5bk%SiKikAAr@*!0NlO#sX^^!kYfDW;m=F32XMlnnSSW zIau>2tZfKu8^hW@ur>qM4uiGJV679@u7|akVeOZ&)_xV7?ZDX;oZZ1W2b>GRxfq;h z!1*3HKY(=xtP6p4VX&?U)|J4zDX{JstUC$o-h}ndVSO7|p9t$$!1@iaej}{E1nWPC z^dJ)zr)59*q8<4&3#~VU)VenHfO@-LfAYOHa`TLC&A_^u(=dA+hOxO z*t`%nFM-X=Ve?AZd;~Us4O{$ROE1_m1GYF}%Qib~*$!KFz?R*xWgl$W4_jV_EvI11 zY1ncOwp@TM@4=QYV9Qsq zfyy3G*$XQBKxGD0+Do8vCREOb$_l7-K;;^!TnCjKpmGybRzl^+PR4sz4C19^w4^mb-V9kyx} zTX(_MCt&MSu=Oq2`YvpJAGVE#Z8q3ez#*?8rawIFdtv*du>CFA{w{2PAGY6s?RQ}N z?@(hvO$gM4K}|2H83Z*$pk^}E%z~P7sBuBfHmKPSHAkT44X8N*H8-H<4%GY(wLwrD z4YkdnwkOmMgxbMiuPuk#1yH*PYEMAzIjB7kJAfUHV8;WnqYLavhaG)k$0XP>19r@U z9o4X7FYI^}cASMB@57Euu(L7jRAHwMJ5yn257^lYcG_U)c-T1sb}omV?*DwUa|7&r z26i5SokwBkN3ioN*!eZ=(qLB*>hkwF7qThh0y=uG6sV zBJ6q(cKrmq{(#-U?l9Q>AncBX-Q8e!KiEA0c8|Bi?rE^Q40cz+?wzoEH|#zOyWfZ1 zmtgm8*aPhGfIZD&PaD{i2zv&?o{_L86ZXu6Jp%U3hdtG>XD{q|6!x5gJ?CN1Mc8u- z_S}U%f5P5S*xMZT+FQWh9>UJqXTn|qd*{R69k6#l?0o|Eo`t>d!`@4+HUW%w3`?__u_Bl?jTF^CaE7;!QqhB4x7MqI~;TNrU0 zBmT^Y=NR!kBSte~3L~a5VihAcF=7iNx*3_m$io?V3?qNa$kP~k10!!|jOxj#WJVpysN)%RBBO3()NdGdC!=0q)EkU?i%}C8 zHIq@Z8P&k3Hb(7d^k7DpGWrNcU&!by7=0C^?`QNM8T~k;hckLYg3*&0y@}CPjNZwZ zT*eeKrkF7oG3F}9T*H{h81ocko?*;z#!O($B*tuGOf6&T7~6}n>5R=}?5T`Bhq31| z_IHeZgt3n?_7lbqXY5GECYCaGEo0X+?l8vnVO(Fvoy@plj60igcQEcg#y!Bew;A^l zLL zc>t5|VDf!Tet^j{nEWl17cwQADMOi3z?9!II+PLgQ;&Zbv;wJGj#{ke$2F!n05-&9%kB;9MY$!&1TwnOk2$KWTs~@y&u!B zVEXk;zme%5GW`ptf5r4|Os{2n9W#oV@e^ho!;HI_k$8X^4>4mLGo~|RCNuUi6EhEE z=FgdVB{Q#P=3C7CkeMGba|1JXFtdtTCo<~{W({N3v&?##S+6o{4YRf|Ya6qVVfIg% zeHybLVfJ5`{S=AWQu-ga=zQhq6`)dWYHiN-Oi%l zu_$pLi)ORvI~FZw@nJ0P!{WXyzLv!|v-lPkf576;S^Oo7>sh>)#rydFLcYI(@2}$f z(R@FJ@29b(KT8I)WGG8+Wy!rP`5j9pvSb!Z=CJgT-n2B8rD-g^lBGAW^j9o>pQWF% z^b3|&v9yt;%`E!~%T8w5Pg(XD%bsG{Gc23MvV|;L#PSrDXR^FM%dchm%`Cr#9`4&GoGL6>Dy0&0VayhczQvGfl#p8LXMjnz^i5%$g;v&0y^i*5W>&|A~xvaY+ z!Me*>_ZI8^!@5sd_XX>|V%=EQjc46%)?@u)tnbPCWY%Y}z8~u^V*M{z|4Y{2$oiXD ze<$njX8lL3AHn)jtRKtz@vNW0`dO4GQ=Ub6B8T#Uln5= zpKN@GjUTh|6E>E!v6_uFY^-Br0~_1exSvhOvuPNc&X%z0JT_gxrpwuM1)IKR(_}VH zWz!5c&0^C+HZ5XvusMy*8Eo#)=4>_(Wpe?W?`HEOY<`T*kF)vDY<`Z-&$D?ho0qY9 z1)Eo~c@3Mluz6d8Eyu9sbhezymb2M%E?X{P%Vli&h%F=7GKwu@*)pCjGuSeVEnRHw z!`8lR&0uRkwhm(J5VroFt&g+y&umSw^=Y=g$kvzGx{R$G*}C}Uf#z(woPN(Ot!6O zTQ}SLvi)SX4`cf!Y`>K4zaX*wcDCQa_WRiWAln~i`(tc>ob7*Q``_690^46=`>SmK zn(aTZeJd5oRAf+*M@1nO$53$`6{k^g1{G&faV`}XP;ogGS5k2e71vR50~NPRsCa;i zC#d*46)#fpCKYc}@h>VqpyDGczM^6{6{DyaOT`2#mQ&HjjwE*cgdHcd<6L%J%8u*V zaU(l^!;ZVyaW6Z5&yEM!@h5itnH>psJafoA$BvKLv5*~W*wIO4FDlci96)6Wm1R^O zOXUevo=oMbRGv=d`BYv=<;7H9M&%V$UQgw1RNhbJgH%3F<&#uCMde?q{2P_8Quzjz ziGNc0E|u?7`5BcXshmXRJSxAXav7CBP`R4QbyRMkatD>wRMt{iPh}I8`>EI)kdSs5+Ob3rJM`oT@9Sx`C?Osd|8_hp2jjs=rY6G*!=1 z^*mLtQS~NOZ&URzsy?9VbE-yBHJPgUR4t%tIaMpET0_-(sy0$pN!3oOc2U(pRWnuX z5~>fQx)0U4ROeA$Om!L6M^XI~s*k1mRH{#>dKlH`P<=ktmr?x-s((%O@2I|y>c^;l zoa!g3ev0b9QvDLuuTuR6)&HdWU8=`Zy@BeQ1Uoa>Igp)Y>^zR0XR`AwcK)25m$374 zc3#QOYuI@cJ8xmxe%c$8%&1Pz=s6C6?bE&<6+MkoCy@c9psJ)Kb8>qdB+FPi-huTM|O;Gz1 zwXab74z=%5`ysU-Q~N2kBd8rs?Ko;DQagp(Rn#6}R|>n1W7lcyx{zI0vFq3Dx|Ln` zvg`K}c0ItZKd|djb|u*L47;9V*WcOo4|aXQuBGhSNL>%=QmGqAT`_gXQFj7$r&BkK zx^t*IpSlaFyMnr_srvK>!+-_(tuZZ&nA6V&aZZXb2+)E`EDPwG>s??ZhW z^#iESqkahW1=JT)KaBc2sQ&}??^FL7^&_dDMEyMKzomW|^*>O*n)-FrZ=ik$_0`nZ zQeRK~A^oI&KMg@c9~yFL$fKc{hB6wCqTwer981HgG@MSuFdELG;d~k{qv3iQZl~b^ z8XltI2^#)F!_zc8OT+Utyhg*DG`vm2zi9Y?hR+XaIOAxTPQ!OJET&-<4QpvAr(qKf zTWP4Fp^k<|8d_-BLqjKx$u#z(F`vdl8h=FN(KH@M0O%sL(^w8eM!^TG>xQb5>4OGw1lQ| znl{o@NmF7cO}l7npsAUrLo7RJ>ZbWHntRfmM)M$=OKCoV=96eXljgH&K9A;~(R>lj zSJ8Ye&A+7iMw)M?`7W9tr1=S&|4#FZG`~sn+cf`+MDqtUe?;?FG!LhF6wPC4o*PRn7m^rR()mOiwk(K3LR0$Pru@rR5x2&Zp%< zS}vyLG6^lepyhg6enrc#X}OJ--_r63EeTp)qU9A@-l63^T0W%ZV_H6?WdtpwX&Fb$ zL|UfMGMAQRw5+G4ik6+UG}E%1mNr@r5$>S1C#@+7TKmwNMr$Umd9)VOdK|4M(0V$p z!)QH+*7Iq-kk%_`y_(iv(0V4JkK0VS z^kYvJd#+^9&Fr~_J-4&x4))x~o(I^|!QS5N?Ze(Q_GXgUo5kJ%>^+gaXRvn|d(UC- z`Ru)xz4x*ALH0h(-pAPcC)#?_mPXqE+D@VEOxk`$+e5S^XnUHrXK8z$wiju8nYRDZ zHjcIlv`waM8g1W8Xj?|x545eOZ7ut5VBhWRdw_ipvF{P~{gHi7usT``c;nMSE}BQ)y49y&vt_wCB=ZO8b$t|Csh;Xg{9zlW6}b z?dQ|}Gukhr{Sw-rqWxvs6R*<#2JLUrzKHgK2?xqJa3crq z=D@ui_&o>i=fGzi7|Vh29GJv`DI93vzyS_)aG;xmhjFkc2U9p$!oj0S96XkTCvfm& z4qnW`n>ctE2k+tFy&U{K2Or?zA2|3F2jAl0mmK_>gCjUNk%MzNxPXHzIk=I7dpLNI zjx0KUM8{9*IE{`o=s1gxbLqH%j$0*k+)Kyr=(wMbhv;~oju+^7k&c(?c#V!X>G+tA zPwDuaj&XEMpkp!})99E<#~eBq(eXVUOXw)4VP?*U@%|=jU{OMdwU9XVaOOL+3m?7tpzg&b4%w)47q(8aj8;*+6GAoxAC5 zqqCc?peu>4WV!~^HI%M=x{By3rRzw#PNwTry8ef*3+cLuu1o3qU%IZM>sk_Bx6}1| zx*n(N&vZRa*RymzPuELyy-L>`bbUeBEV_2kwTrHObhXos?jCgaqPsWU8FcrjJBRLp zbYDvMU35Q1_X~9YgYLKJeuwV&By@jB_s4XPqkN@Qm<+@RIPV@P_cd@NeNOVd9}nlZ9ErTw%VjP*^7X zAgmVF3EPC7LbI@2^tf8|xJC51P4u{5^ms`0ctrI0qv-L3=Y3IE0P`*Nso%8mqpU6 zBI#X`^qxriP$YdMl17N6Q6g!kNSY&(=82>QB59FGS|XB`i=>qzX}d_O6iJ6_?h;82 zB9YWAl6H%vHqo<(=-E^BOc6Z~7d_7rJueqMuMj=27Co;OJ>M5SzY;ya7ClFbo})$2 zI?;2#=y^c&>=Zq_MX$3&uS-R*%SEp%MX#$xuXjbS&qc2o4-v^FBKc^Me2PduPb6O;k}np?mx<&nMDn#F`IjR3Mv;80 zNWNDj|3M@_C6b>J$QvC{m_~l<6X6mPnZ^Qs#@4g(79KNLeaUR*00%B4w*csSqiZBBep3G>Js- zGST})(fef4`&7~Ue?;%|MDGhk?X^eGg5iY21Y^`g(6qR-u; z&u>Ma--|wI)ZdEK z`$X!4BK2XB`j|+4T%5nQhv-`^`tBBe_lmS$BCWSbOBHG9BCVfD%NA+5 zB5jaJ8!FO{6=^4kw39?y;-@0*e?;1uBJFIEcCJV}U!+|j(ry=N4~n$^h_neJZHGu} z6Y1$9Jy)cciu5By`l%xQbdf$xq@N?w&ll+ziu8*``eh>h3Xy)RNdJvUze}XwBhr5> z((e=ys>nuiyAp2+%{$ht^mT_v)v6BSp?BBIhEJb6rB@+#+&*FLLe|IS-4R$3)KKBIgkHjL3OT z>pB76Yb<0jtD-Hj#Un$n7n1vqkPuky|KoOGNGwB9VKP z$Q>qf&k?!ji`)xE?!_YaQjvSD$h}VF-Yjx&6}i6=xp#@&2Sn~4MDC*^Hz9JL5xLKa z+*d^I>mv6pk^66vJ6zAMC2|Lxhq8ODv`TZB67Ei-0dQ_QsnLwxy@o= zo*0-f23{lvUL^+pQ4D-i4E$6K93uvf69XrTfm6i5>0;n4F>s+6xJV3KCI2CfzZ zw~K*0#K0OcuucqY6a#mQfo)=7yBK&NA@X{NybO`oPvi{{d3hplh{!7vd8H!nNRfBE z$Qvf|&K7xBio9z?-gP4H29bA@$h$@4-7fO(6nXcEyvIe}lOpdak@r`T_cxLEg2;PG zNSYXuDTZW;A;n@ysTgvk z81f@AK$x?~9?M#n4G&=wdN+jTpLLSdWCWwN`qF|aRm@5kAi-HBBV38=O z6a@!G;UH0Xq$vE6C_GUVo+1iQ6NP7p!m~u-&qd)SqVRH2c%>-3MikyD3SSY0ABw^W zB2hS36wVWc3q;`}QMg1DE*FK{MPZ{TY!-zrqOesI^%g}rq9|7s4H88|MNy$BDiKA; zi=q=n(Mh7{WKndkC^}yhT`h`!A&RaSMZXe7zZONeiK071(cKbJ^q44mTogSiik=ch ze-%Z46GbnGqL)O``=V%$C|W6sYDIC1DE^5kzEKoEB#INF_-Rr6tSEk76u&5nUlzq5 ziQ*BWc$6p}D~czG;>n_Tnkb$xihmHrs}3cv6~*PEc#|mJDvIkxaib`15yg8%@jg*} zKolPoB`Koha8c4%lw^pK{-PvDloX4SQc-fGDEYA{IYyKmFG_}qlCwq0MWWNfLN*YB;izwM6N)C!dNtY-M zQJN%5etq?C+xNWl{E;D0@?s keId$5i?X?*Y=J0S@_)a}m2_C{{|j=;|DXT=C(4%nA2nY~F#rGn diff --git a/main.cpp b/main.cpp index 305fdd2d92..205a5473cf 100644 --- a/main.cpp +++ b/main.cpp @@ -89,12 +89,12 @@ Hand myHand(HAND_RADIUS, glm::vec3(0,1,1)); // My hand (used to manipulate things in world) glm::vec3 box(WORLD_SIZE,WORLD_SIZE,WORLD_SIZE); -ParticleSystem balls(10, +ParticleSystem balls(1000, box, false, // Wrap? - 0.0, // Noise - 0.3, // Size scale - 0.0 // Gravity + 0.02, // Noise + 0.3, // Size scale + 0.0 // Gravity ); @@ -112,7 +112,7 @@ ParticleSystem balls(10, #define RENDER_FRAME_MSECS 10 #define SLEEP 0 -#define NUM_TRIS 200000 +#define NUM_TRIS 10 struct { float vertices[NUM_TRIS * 3]; // float normals [NUM_TRIS * 3]; @@ -616,7 +616,7 @@ void display(void) if (display_hand) myHand.render(); - // balls.render(); + balls.render(); // Render the world box render_world_box(); @@ -781,7 +781,7 @@ void idle(void) field_simulate(1.f/FPS); myHead.simulate(1.f/FPS); myHand.simulate(1.f/FPS); - // balls.simulate(1.f/FPS); + balls.simulate(1.f/FPS); if (!step_on) glutPostRedisplay(); last_frame = check; diff --git a/particle.cpp b/particle.cpp index 88ea0c96c7..96727a2dca 100644 --- a/particle.cpp +++ b/particle.cpp @@ -27,7 +27,7 @@ ParticleSystem::ParticleSystem(int num, int i, element; bounds = box; count = num; - wrapBounds = wrap; + wrapBounds = false; noise = noiselevel; gravity = setgravity; scale = setscale; @@ -38,6 +38,11 @@ ParticleSystem::ParticleSystem(int num, particles[i].position.y = randFloat()*box.y; particles[i].position.z = randFloat()*box.z; + // Constrain to a small box in center + //particles[i].position.x = randFloat()+box.x/2.0; + //particles[i].position.y = randFloat()+box.y/2.0; + //particles[i].position.z = randFloat()+box.z/2.0; + particles[i].velocity.x = 0; particles[i].velocity.y = 0; particles[i].velocity.z = 0; @@ -45,7 +50,7 @@ ParticleSystem::ParticleSystem(int num, particles[i].parent = 0; particles[i].link *= 0; - element = rand()%NUM_ELEMENTS; + element = 1; //rand()%NUM_ELEMENTS; particles[i].element = element; if (element == 0) particles[i].color = color0; @@ -53,7 +58,7 @@ ParticleSystem::ParticleSystem(int num, else if (element == 2) particles[i].color = color2; else if (element == 3) particles[i].color = color3; - particles[i].radius = radii[element]*scale; + particles[i].radius = 0.10; //radii[element]*scale; particles[i].isColliding = false; @@ -79,10 +84,10 @@ void ParticleSystem::render() { for (unsigned int i = 0; i < count; ++i) { glPushMatrix(); glTranslatef(particles[i].position.x, particles[i].position.y, particles[i].position.z); - if (particles[i].isColliding) glColor3f(particles[i].color.x * 0.7, - particles[i].color.y * 0.7, - particles[i].color.z * 0.7); - else glColor3f(particles[i].color.x, particles[i].color.y, particles[i].color.z); + if (particles[i].numSprung == 0) glColor3f(1,1,1); + else if (particles[i].numSprung == 1) glColor3f(0,1,0); + else if (particles[i].numSprung == 2) glColor3f(1,1,0); + else if (particles[i].numSprung >= 3) glColor3f(1,0,0); glutSolidSphere(particles[i].radius, 15, 15); glPopMatrix(); } @@ -110,20 +115,27 @@ void ParticleSystem::simulate (float deltaTime) { particles[i].velocity.y -= gravity*deltaTime; // Drag: decay velocity - particles[i].velocity *= 0.99; + const float CONSTANT_DAMPING = 0.1; + particles[i].velocity *= (1.f - CONSTANT_DAMPING*deltaTime); // Add velocity from field //Field::addTo(particles[i].velocity); //particles[i].velocity += Field::valueAt(particles[i].position); // Add noise - const float RAND_VEL = 3.0; + const float RAND_VEL = 0.05; if (noise) { - if (randFloat() < noise*deltaTime) { + if (1) { particles[i].velocity += glm::vec3((randFloat() - 0.5)*RAND_VEL, (randFloat() - 0.5)*RAND_VEL, (randFloat() - 0.5)*RAND_VEL); } + if (randFloat() < noise*deltaTime) { + particles[i].velocity += glm::vec3((randFloat() - 0.5)*RAND_VEL*100, + (randFloat() - 0.5)*RAND_VEL*100, + (randFloat() - 0.5)*RAND_VEL*100); + + } } } else { particles[i].position = particles[particles[i].parent].position + particles[i].link; @@ -137,11 +149,14 @@ void ParticleSystem::simulate (float deltaTime) { // Check for collision with other balls float separation; const float HARD_SPHERE_FORCE = 100.0; - const float SPRING_FORCE = 0.1; - float spring_length = 3*radii[1]; + const float SPRING_FORCE = 10.0; + const float SPRING_DAMPING = 0.5; + float spring_length = 0.5; //2*radii[1]; + float spring_range = spring_length * 1.2; float contact; particles[i].isColliding = false; + particles[i].numSprung = 0; for (j = 0; j < count; j++) { if ((j != i) && @@ -150,14 +165,17 @@ void ParticleSystem::simulate (float deltaTime) { contact = particles[i].radius + particles[j].radius; // Hard Sphere Scattering + if (separation < contact) { particles[i].velocity += glm::normalize(particles[i].position - particles[j].position)*deltaTime*HARD_SPHERE_FORCE*(contact - separation); particles[i].isColliding = true; } // Spring Action - if ((particles[i].element == 1) && (separation < spring_length*2)) { - particles[i].velocity += glm::normalize(particles[i].position - particles[j].position)*deltaTime*SPRING_FORCE*(spring_length - separation); + if ((particles[i].element == 1) && (separation < spring_range)) { + particles[i].velocity += glm::normalize(particles[i].position - particles[j].position)*deltaTime*SPRING_FORCE*(spring_length - separation); + particles[i].velocity *= (1.f - SPRING_DAMPING*deltaTime); + particles[i].numSprung++; } // Link! @@ -194,14 +212,20 @@ void ParticleSystem::simulate (float deltaTime) { // Bounce at bounds if (particles[i].position.x > bounds.x || particles[i].position.x < 0.f) { + if (particles[i].position.x > bounds.x) particles[i].position.x = bounds.x; + else particles[i].position.x = 0.f; particles[i].velocity.x *= -1; } if (particles[i].position.y > bounds.y || particles[i].position.y < 0.f) { + if (particles[i].position.y > bounds.y) particles[i].position.y = bounds.y; + else particles[i].position.y = 0.f; particles[i].velocity.y *= -1; } if (particles[i].position.z > bounds.z || particles[i].position.z < 0.f) { + if (particles[i].position.z > bounds.z) particles[i].position.z = bounds.z; + else particles[i].position.z = 0.f; particles[i].velocity.z *= -1; } } diff --git a/particle.h b/particle.h index 6e55feebbf..3c2a74de30 100644 --- a/particle.h +++ b/particle.h @@ -34,6 +34,7 @@ private: int parent; float radius; bool isColliding; + int numSprung; } *particles; unsigned int count; From c604a88a0735173462857aace578a4aa1515a324 Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Mon, 12 Nov 2012 17:48:28 -0800 Subject: [PATCH 004/136] Texture loading only happens at init() --- main.cpp | 11 ++++++----- texture.cpp | 1 + 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/main.cpp b/main.cpp index 455ae556fe..ecfc6dddcf 100644 --- a/main.cpp +++ b/main.cpp @@ -112,7 +112,7 @@ ParticleSystem balls(1000, #define RENDER_FRAME_MSECS 10 #define SLEEP 0 -#define NUM_TRIS 10 +#define NUM_TRIS 100000 struct { float vertices[NUM_TRIS * 3]; // float normals [NUM_TRIS * 3]; @@ -259,7 +259,10 @@ void initDisplay(void) void init(void) { - int i, j; + int i, j; + + load_png_as_texture(texture_filename); + printf("Texture loaded.\n"); Audio::init(); printf( "Audio started.\n" ); @@ -578,9 +581,7 @@ void display(void) glTranslatef(location[0], location[1], location[2]); /* Draw Point Sprites */ - - load_png_as_texture(texture_filename); - + //glActiveTexture(GL_TEXTURE0); glEnable( GL_TEXTURE_2D ); diff --git a/texture.cpp b/texture.cpp index 475047a40d..583d1d9ac7 100644 --- a/texture.cpp +++ b/texture.cpp @@ -34,6 +34,7 @@ int load_png_as_texture(char* filename) unsigned int width = 1, height = 1; unsigned error = lodepng::decode(image, width, height, filename); if (error) { + std::cout << "Error loading texture" << std::endl; return (int) error; } From 1f6134b14beeacac24fd4ce0681b9929d39b6baf Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Mon, 12 Nov 2012 17:48:45 -0800 Subject: [PATCH 005/136] Lock build SDK to 10.7 --- interface.xcodeproj/project.pbxproj | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 7b58f146e6..19f14f81cd 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -55,7 +55,7 @@ /* Begin PBXFileReference section */ 08FB7796FE84155DC02AAC07 /* main.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = main.cpp; sourceTree = ""; }; - 8DD76F6C0486A84900D96B5E /* interface */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = interface; sourceTree = BUILT_PRODUCTS_DIR; }; + 8DD76F6C0486A84900D96B5E /* interface */ = {isa = PBXFileReference; includeInIndex = 0; lastKnownFileType = "compiled.mach-o.executable"; path = interface; sourceTree = BUILT_PRODUCTS_DIR; }; B6BDADD115F4084F002A07DF /* audio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio.h; sourceTree = ""; }; B6BDADD315F4085B002A07DF /* audio.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = audio.cpp; sourceTree = ""; }; B6BDADD515F40B04002A07DF /* libportaudio.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libportaudio.a; sourceTree = ""; }; @@ -289,6 +289,7 @@ "$(OTHER_CFLAGS)", ); PRODUCT_NAME = interface; + SDKROOT = macosx10.7; }; name = Debug; }; @@ -312,6 +313,7 @@ "$(OTHER_CFLAGS)", ); PRODUCT_NAME = interface; + SDKROOT = macosx10.7; }; name = Release; }; From 13344f17747e01b0c7210a0cf2e42ce21c408616 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Fri, 16 Nov 2012 16:45:44 -0800 Subject: [PATCH 006/136] Head tweaks, audio toggle --- .DS_Store | Bin 6148 -> 6148 bytes SerialInterface.cpp | 18 +++--- SerialInterface.h | 4 +- hardware/head_hand/head_hand.pde | 42 ++++++++------ head.cpp | 20 ++++++- head.h | 5 ++ .../UserInterfaceState.xcuserstate | Bin 99083 -> 101447 bytes main.cpp | 54 ++++++++++++------ 8 files changed, 95 insertions(+), 48 deletions(-) diff --git a/.DS_Store b/.DS_Store index 361ab67534efb94b6dcd4539d6dc13010ead094e..8a70b493dd03a6d3d3fde862d0b4df0112a17ed6 100644 GIT binary patch delta 213 zcmZoMXfc=|#>B!ku~2NHo+2aH#(>?7i!U%UF>-C@VX9 0) { @@ -72,20 +76,18 @@ int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_chan // At end - Extract value from string to variables if (serial_buffer[0] != 'p') { - sscanf(serial_buffer, "%d %d %d %d %d %d %d %d", /* Needs to match Num Channels */ + sscanf(serial_buffer, "%d %d %d %d %d", /* Needs to match Num Channels */ &adc_channels[0], &adc_channels[1], &adc_channels[2], &adc_channels[3], - &adc_channels[4], - &adc_channels[5], - &adc_channels[6], - &adc_channels[7]); + &adc_channels[4] + ); for (int i = 0; i < NUM_CHANNELS; i++) { if (!first_measurement) - avg_adc_channels[i] = (1.f - AVG_RATE)*avg_adc_channels[i] + - AVG_RATE*(float)adc_channels[i]; + avg_adc_channels[i] = (1.f - AVG_RATE[i])*avg_adc_channels[i] + + AVG_RATE[i]*(float)adc_channels[i]; else { avg_adc_channels[i] = (float)adc_channels[i]; diff --git a/SerialInterface.h b/SerialInterface.h index e2bb82fde1..3fd7a2146e 100644 --- a/SerialInterface.h +++ b/SerialInterface.h @@ -9,7 +9,7 @@ int init_port (int baud); int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_channels); -#define NUM_CHANNELS 8 -#define SERIAL_PORT_NAME "/dev/tty.usbmodem411" +#define NUM_CHANNELS 5 +#define SERIAL_PORT_NAME "/dev/tty.usbmodem641" #endif diff --git a/hardware/head_hand/head_hand.pde b/hardware/head_hand/head_hand.pde index 7e4bb60955..2dbdecdb86 100644 --- a/hardware/head_hand/head_hand.pde +++ b/hardware/head_hand/head_hand.pde @@ -4,16 +4,16 @@ Read a set of analog input lines and echo their readings over the serial port wi // ADC PIN MAPPINGS // -// 0, 1 = Head Pitch, Yaw gyro -// 2,3,4 = Head Accelerometer -// 10,11,12 = Hand Accelerometer +// 15,16 = Head Pitch, Yaw gyro +// 17,18,19 = Head Accelerometer -#define NUM_CHANNELS 8 -#define AVERAGE_COUNT 100 -#define TOGGLE_LED_SAMPLES 1000 -int inputPins[NUM_CHANNELS] = {0,1,2,3,4,10,11,12}; +#define NUM_CHANNELS 5 +#define MSECS_PER_SAMPLE 10 +int inputPins[NUM_CHANNELS] = {19,20,15,16,17}; + +unsigned int time; int measured[NUM_CHANNELS]; float accumulate[NUM_CHANNELS]; @@ -29,24 +29,30 @@ void setup() accumulate[i] = measured[i]; } pinMode(BOARD_LED_PIN, OUTPUT); + time = millis(); } void loop() { int i; - sampleCount++; + sampleCount++; for (i = 0; i < NUM_CHANNELS; i++) { - if (sampleCount % AVERAGE_COUNT == 0) { - measured[i] = accumulate[i] / AVERAGE_COUNT; - SerialUSB.print(measured[i]); - SerialUSB.print(" "); - accumulate[i] = 0; - } else { - accumulate[i] += analogRead(inputPins[i]); - } + accumulate[i] += analogRead(inputPins[i]); } - if (sampleCount % AVERAGE_COUNT == 0) SerialUSB.println(""); - if (sampleCount % TOGGLE_LED_SAMPLES == 0) toggleLED(); + if ((millis() - time) >= MSECS_PER_SAMPLE) { + time = millis(); + for (i = 0; i < NUM_CHANNELS; i++) { + measured[i] = accumulate[i] / sampleCount; + SerialUSB.print(measured[i]); + SerialUSB.print(" "); + accumulate[i] = 0; + } + //SerialUSB.print("("); + //SerialUSB.print(sampleCount); + //SerialUSB.print(")"); + SerialUSB.println(""); + sampleCount = 0; + } } diff --git a/head.cpp b/head.cpp index bf1478db02..bb0e71d010 100644 --- a/head.cpp +++ b/head.cpp @@ -43,6 +43,8 @@ Head::Head() PitchTarget = YawTarget = 0; NoiseEnvelope = 1.0; PupilConverge = 2.1; + leanForward = 0.0; + leanSideways = 0.0; setNoise(0); } @@ -51,10 +53,22 @@ void Head::reset() position = glm::vec3(0,0,0); Pitch = 0; Yaw = 0; + leanForward = leanSideways = 0; +} + +// Read the sensors +void readSensors() +{ + +} + +void Head::addLean(float x, float z) { + // Add Body lean as impulse + leanSideways += x; + leanForward += z; } // Simulate the head over time - void Head::simulate(float deltaTime) { if (!noise) @@ -71,6 +85,9 @@ void Head::simulate(float deltaTime) Roll *= (1.f - DECAY*deltaTime); } + leanForward *= (1.f - DECAY*30.f*deltaTime); + leanSideways *= (1.f - DECAY*30.f*deltaTime); + if (noise) { Pitch += (randFloat() - 0.5)*0.05*NoiseEnvelope; @@ -118,6 +135,7 @@ void Head::render() glPushMatrix(); glLoadIdentity(); glTranslatef(0.f, 0.f, -7.f); + glTranslatef(leanSideways, 0.f, leanForward); glRotatef(Yaw/2.0, 0, 1, 0); glRotatef(Pitch/2.0, 1, 0, 0); glRotatef(Roll/2.0, 0, 0, 1); diff --git a/head.h b/head.h index eef04f2eb3..be228137ec 100644 --- a/head.h +++ b/head.h @@ -34,6 +34,8 @@ class Head { float MouthYaw; float MouthWidth; float MouthHeight; + float leanForward; + float leanSideways; float PitchTarget; float YawTarget; @@ -44,6 +46,8 @@ class Head { glm::vec3 position; + void readSensors(); + public: Head(void); void reset(); @@ -52,6 +56,7 @@ public: void setYaw(float y) {Yaw = y; } void addPitch(float p) {Pitch -= p; } void addYaw(float y){Yaw -= y; } + void addLean(float x, float z); void getPitch(float); void render(); void simulate(float); diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 7f7bb244fffdb9c9c784aa06962122e08f953684..6c983b0f1039261b38ae49102b70dda728b186b1 100644 GIT binary patch delta 40200 zcmb5V2Y3|K_dfp4WM(Nlv(sl1(mU*C(|aQ!2@rbk5JD0lQUWA&aQBKJDoD9%P-%fs zL`4tk`@AFG|p3Izk&U@c;&pEg4a_chNOY3Z_4bIwZ zwX6y0!pvM#wl%a1lfuhOes^&^k8~3{h0ogVHPtX`#g-uGM(tkpi*Z0 zzQaMCS&x01eTKc!KF2=azR2#e-(X*AUv9tEew%%b{dRk^{cigP`)2!A`*!=i_WSJ* z+8?q%Vt>^Bg#BszA^Y?8m+Y_F->@ICzimHmKWYEKe#-u_{WJTQ_OI>d?cdseu>WlT z&Hje~AxN+Z9Rxv;1XVDEU?Eh95Tb=RAyM!+1-FnYWC+EEKL6h;XBDlW>c$Qdlie;ZET$VZE?P*dlBb zb_(|iyM?{N!@>dKap5W9pzxgVqVS6Fx^P%{OL#{(A-peqB%BdG6~6EYXN7aZMd5ql zC*fD&lJJ+vioDofw2KZ=5p~fLJBi_9lo%@}h+V`aF-1%lv&3AnKr9wZ#d5KS*h}my z4iE>6!^Dx|7_my6Al8YK#VO)+ai-WL&J`Dki$$-vL|i7`EUpk&iEG6>#C765;zqGW zY!xeZh`YoG#69A};sNmqhv5iuggK%dv5rKC(~;~*b7VPk9fgjrjxtAgM=wWT$3Vvr z#|Xzr$5_WW#{@@>W0GUCV=8^DU5zJ13X`IwSSe9*O36~1lqKa#hchaxGmqA1}?gc7MlDbY%d604*rsY;rXt`sX>l@g^?>8tcp`YQvJG0IqF zoKmGsQKl->l<6L2fwE9pq%2mhS3HVWA<9bSHf5EvT3N4bP&O)?l>3zXl?RmF%Hzrt z%9F}d$}7sN%4^E&$_eG9@}Ba!@`duH@`Lh=^1Jev%BnWCgDR@BYO0nRsz$3ZYOLB> zO;%IX3^iBHQ}fjlwOp-Kd#SzEf$AXDGfW+=j#0;|Gu7GZ9Cd-ZQ1z%@^+t86db4_q zdYigRrRweKI`uAfle$@LRqt2#s*k9Ts|VHR)mPOw)VI}R>IwBD^^E$3dRD!t{-FM* z{-FhF?KDwSHB;-PMQPDmqSjeU($cjotw1Z(inI!?yVgVNsa1Nk!P*dQs5VR+t4-1- zYxUZ7+7xZ7HcgwW&C}*<3$z=xrP?y>X6-KRZtWgzy|zKysBO`Bdslm3`#}3t`%L>>`$GFtyQqDueW!h|{jOco{?Pu^ZF)Ps zy)NsD?ooA357oo;a6Mj6&=d7cJy$Q#yXxKa?s}!(S0A9)>UH`=eUd&|uh*~Br|47l zY5GikslH6VNnftttly&Fs;|&j>bL2)>v!mP>s$1#daJ%o->&c0AJm`IpVwc|U({dH zU)EpIU)5jJKh!_cPwA)iGy2E+Cm#J%{R{o9eo6mB|5N|VfWa875oB;iJHu{>hH7X= z7sF||j3mQtBpWG4s*z@-8yQBnk#7_krABw7pV8kKU<@=y8g<4*W0Eo1s5hn=^NhuY z*C56X#u8(hakD{<+l@Ok#kkYhVr(^9jcvwu<9@^QfU(zj$avIv(s%oR+|&dIp$n*o;lxKU@kNlnTyTq%^S_7=B?%m^G=W1Y_2o! zGB=nHntRN>=0oN_^I`K5bH91OJY+s=zHGj39x>lGkDKqB@0%Z)ADf?>-@vRw$LKhjn+-pE!M5pD(iM@owc4G5=xo*G)TNNWGvyM2_HxJE`;w%_)5ah zAVHT%(4~D3i9;Cfb;6z7*HL<;y{LN!ct?B3ddGUldB@R#8#~kQt$OO+&jWM(mTp$GZB** z7mY}YW|?GKniR`2X>?pj0+UI-vr<_mhn|i}V3~ZnCO?j4isL&49Lpff}O0ws;tK9tihVB#RhvfcsF`Cc{h7oyj#3my{(r!V?*hta%U?1d|xWq1oV@B2A+SWqyd zvv>G-vg?rG9p`<;&+}F9SUP>uYI-p-fp(5b*jL(lPG*sEUS^tbH|Q zlKZfBO8X~gvGzNuw5=<17wx~z&8(-3x1}?i=$>uK%ociTTOPBGu1-m1?K^4j?QzU~ zR7nk^E4GWwZmOhqX6<`v@r*>){xChaJ&QR&H_ULe_Q&b#Gm=>QQ`Ea7gE>f(XC|=r z=VXMK+JB`aGlR8XqPI0gvi85|gH5@tz)~g4#R@zv_Wfy3 zr)0&jf}O6)N@oQJjlMsIQRvgNqgX+wr)S5qf<@igX{^wRj?a!^g>X7%j*}Il=$>ph zE5y=M*-lnSp!S?tR_H=gbK+SciKfhTvqB0TH#eRY(&+;^PFBdGd*`OHLM|0@liCRd zK1ai;l55fAc_J$m)1rAftWZif=DJy-oW7CUg%x`EQ(EXnqw?}up)YNjA4>br*I8iz zjmit9-uaQNFqpQ?PiZF%^WjC&>O7Nr=Xa!$`R!O?c1 zsjN^(5Bb_A)9`{!R+vKf?sYQLFHc@k%uK30l+HBKrxzx(!d!ZOVG1iOpplDQtgx8& zS=5siy!7e9o~*Eho?eu}3d?9wk*l3>vyVe0je1z8qZg~Ju!1%$j$?&Y^r@m)R#;0* z9!_KKpofb?sJmEUg>|%~IE@wVp+_HfGaG43aS|)E(8I;fc0#KUDVlz}xD)Nv)xioo zXv+(c)Vsu3yM(6&9-S3-(Se>Jtnh%ZzNvSKNk8xytgwfk_h!=mCDE*~j~?0Ii`h@d zE%zJJ{s-ci$LM+QP*!;Is_?G^am+JxpeJ?h0gnDw8pR6F(nE(L=?srT`*f37;RQOg zCSZTzQG8X5qBkpiLf2Gup{JMIXleydPe0*cKBsFcN?GA6 z`sZ>tt$NaAzM&VFRJ$y9l6Ad^PV4Cq2j4%=y7-*8sUq-O@C(_8vRvSN-eWNQa%^uxX)E9TR0 z`i^47BC0%>P8alxV8s&p(Yk=;oBbkLv5d}mJ)L@9h-SLemKXXlmGt5ZBbh#J3x&#x z{i(dV4=WC$<5rJk#i8`ffPt(yg7zJfPFJsX&^~J%tT>vESyRP|wCv6hlS#jH4q9vU=|6|bY;tR2OQ)99KCpVit|Wo8CdULC|V zu3eW-<=Zt@oJ0E#>Boxm>A2Ma+9|IkFpKDs+b6Q3hmIdQffa9{@@hBj|9TX&l+Jj4 zFteP>tJCR>J3F!Bt^UWSeUG=%nKhZz(X6xL8alHkkB)7YSn>9?>+w2@|R{GJp!K}EQp7;8@da0#K*mbn>V)!o)W+E z7CtDu=vPPjbQC`oKNCL}zYxEqSx0B7BttEz@dTUT{ zd_dmXChy{_zX5d3$rL(Hv(wcdb)YNWwzQ;x;`TPhRQkpTY4qYT8$I{7sAUA&ceb_X zQ1Mt#Ek7XN)g~|Y+dcVVd%EDbT`LW=-{00=L7zF+Ex2buzPnA{$44xO&ODJsE8a1* z{sDzOZ3=_wk$1Y%h_`K?$!$Y2ETHy~7gch_JomhshT1xJZGB^PV`JU42~+3ir=&DC zRo6_ZnmDU^T3y!}vl?pZ8XGIii_)@kvlE&o&#J4gtt>CfPVl!^mKUYAVUG@AKm1Q@ z&;Jdcot622fR7J=@Bb(G|BapFNz3^^!0Q6wkN!V{r>Ev-wPEM{+o)WJ0sbs}{Qns| zEiEIp4g7CIPVasc|N6$cMqrLpm}gwCJN?X81K_w*G?%`rtR;fTLl!~Nc67&-Z`k4g%LV|v6l}e;ixTP`@^cxBK-G3|a z2VHY^!HE4shaFwLz?ZgnJLYZ8l@(}*$F2QP9&V8RhlEsgUtxex0`X{oeKx`}Wa;dH_ogfm;Eo26UemR1nXB3!T^ zE5wIISAG(ca;J2U&#qZoC*398O}J3Pg%K{Ca1pK2dTB$z-$=qmUG;ZC<^Exxce`%o zoDmCClcKV6vi>%DJEaHy)1%$egK$fG2^T}SSU*}EZTU1l<$(0$e=>hcdKzx&AmI`S zm*}_YOn>~;new9an$PB6qx8D;2HetN!Z``&^4lcQexJpA-jUw>Pfy>MJ_vZ4Ot_SQ zr?I8`hYR;?eSe(a&5X>S{ z0$7=Z%WA`N+|Ym3h<}0lOXhq~|5g|>?~!c*tQ^ASUd0-mdSbw$lYXqUoU}FiD zi4d8wTzA6tAY9K@IaAJpTh1X|CE#i4NflRiisyD%*;BO^5_DlH>BXYr_o>8Yt{8Q07! za*aIM2mNmuE7!}{!7WcEc5Wo$M)|Qv)1N+z^T@O0x&KLMo;)9Jc_HD(5^kK|sLDr3 zx^G5&yD2sedrQ(NV`<$K_kHxOHw$?z=)**R@RtZ+()|H4x`Dt5RPk#H*rx6;q?HXq#CzxFOYJ$hkAYGzVY zT3T-E;t>mTb26`?sc1@YlDDu`(G^266^n4I3AcuDYY9hNl@O&i$!db-cOv|Qh~<>uvGODI#x z^ZEI23#pQ?6u_+%5$+yR#jW?_Zip@2-*aqtZ}-yC3)3?4lA_X5^U@cOSeTc6ZP6-a zN>3l|zZI=gsq}(d=|i|pgxl`->X zt?VM)6NG!xZ}XHdKR;g@`CvDH4W;MhE*`NkJ@+5Eut$01KY8p|4#2HEM!07PcaQ{e zmk4)=R)3r5c}98uKZ(4cya>1QGU1*h-1B}SFZlNcD|>!Mee-O#uToO8vwibyR{B3m zvZ zUow2He50IG&J*qp!W|~uQNq35s$5jQRlZZcC)_)P`+#sC6Yh(v=S<}%|7neT^Xg7p z`HfEdVQq;2(69UvXgzYZRb}Y(?^o$67if5^t)V?te(0&I_CUk2wg!n#`(Y4$`Nu-~ z`wtydRaJFwVT-B}?s$u85bj;N_~+QJYKXV0MeRhm6UT5C9q^4BriQB#Y9tb>WV5oo zV&Lrhnkju68m0`Wo>bRw_OuCgvk3Pd;oc|QN$*(N@WTpv`j-wJ-oka5qQtUUCi%K`5$?kl)lImMeBCCfX})gL33sZk+e|e=%|-&@&iK(!d&kll z-`_|_{;YOW3)SK_@?8n{iSI7Nj?dL@KAbYbecFaop+=}Zd^n%`aX#~ot&;QoN2MQs zj#AaWYJVS8Kf-<4q7ES3R{=spd_Y49ceV{^xEi63^Z|Y22m0FY?g0Aoj}@M)2N$(U zo$M{#sg75x)d^~iTC3Kn6V*wCJ5RWag!_(gKM?LG!u>+H-w1bUr&_OGr%q9)s?*fz zYJ)n1aDNh>CwvFOI|wfmUMGAf!iQZ=6tyuB*bBasclzba4!YsjcGUA*a{GCK#&7AG zUp4yjZ=D_f^idZF#NYeHqkm0qe?vge;^~ zw_g(w{O%KEcT3OimYPwP=FZMY>+a4h&CPP>rKM%MyQk--XJwaV!1NEF zY1i)MrRf=&4SBD0=wf3>I^ z2@hXnBGoOv$ZRD%^A66c+tls8EJdx6z-}^l@=u!1C`pfSH zo+s6(+c-T#c-xi4d)CM5Il{O5C##p#UFtk9+7rILpVYU1C-pkvgJfHq%ZJsYKKLVq z@7SWgMR#Jj1U!SO-wYl;+;iW6CeC4fbyFyjJQO~!jT_C*TxuW--kH+_eSN}=lXLXl)lb?p> zr}6XOX_ZysC>u(6@)(-wiYnB!~w%_2svzw|X zOs(@-ZPhF-1P#Ob4fefVC9El7T7UFw~FccT36{PDNp(lUL+CA5L(Xn8*HT*Ak+X!(SXr9=LV z4%517-Moccv=Xh9@bQFC*rJtb<%CZpd}q4m&*}uNx7OcVxK-<;_0{?j-br{D;ghy% z1GItKAi}!|-<|M1Xz*Xtduk)R1+CghZIm`z8$(wFqT*RZU5V*(!iPNa9aGPiYPV`@y@lJg@!Cr5Hf@5o zT3h4iSWNh?gij-U3E@k(Yn0KoJG47}-{T;BH}5#Yml3|g_Z5}qLqSY*^HV|0a?d7o z&^Bu=zIo|}^1fcsGjA2qS;tko_G&=Z-wP@otLUH|4!jR+e%;3O zX@4y6Hn=&e9n-t{X&YnfcrqXw>Idl+?fVQy&_2@6cyqUCr?k_AA5QoYTeOe0PY6Gf z@S~bnv}3wNpViL!6u$N;jPWa+*Der#EaAruom*eqG`aamJ0>jpN9`A%+D|^U@qV>m zwciL|P523IYN25|^q_0?bWZ09UrYEpA5dtF-cc95xm$F*4{4GQNq6WH;U^Qm{%=Sg z-Oz)tC8meyod`dL@Ka+;Zd~opy4aGNX80ejgig>S^_YK{#p-c{pHBFO*peF#UN*b@ za5)3?F5ZG&x>I+FuZS;;FX<`b$x=N{og}_S_?d)nBtegmpnW80FX3mCpmzvAmvEOz z&|_Bvt7iwse%96fou1!JY)m&@FAB6YwY8M`TGHr-6N;{v2U_Q}wf3Z|PxaOH-hqaB zZ4LeDg_G&E{tNFb-kSO#eYCf5r#@I8q7T)F>BIFA`bd2g;TI5oA>kJhelg*%C%lL7 zUc!@|aO-3Bad1P@Mdgeo}Q)!>rMjo5wI$qRaZS?T6O*Oi7MR%>#a$=Gc;kYqd>%ul~4i5FgU_=@09V===2p`lI?|gujdMcN6{|!mlU% z2EuP7{3gP0-l;#KKdC>ZKdnEbAJh+Z(w`-K3*ql0{G)__jPM^2{tV&2A+~nS5ur?{ z<};m`uY2on=%NT;9GM(m(eWY`#*CUwI4KwmkYb`VT&a=k)XX1^uG_t^S?l;s z`~!sFP51{1zlZR93IEUzB*2Z=^k4K}k)Z$X9qY^LY{Ks&{KMX{gx_B!$5xhCbgOQv zncTg8daW;4njI00t$9f}^Tz-l9hiQGZA`zxgQ}-bsw-=lJ-vzWkN8&>qrK5#)a8B5 z;JT(QMn^RK?foqyU@Q>+KtSJNNXOxX%&( zdBVTwe}0MZ@A)6zZ(bM49QCB8T`o@})5s$H3!4M9a*W(DSGQGz>zcM2IYu5DhV~mg zxUR`R{6?lx7|{2B5!C4FEod=H2>-^_axi)ty}h|xj7p;y z;a~L-{*wcYGzNXo8IK<(@<00 zRNpYYab)A{3H6PQv+Ej%G|it;H+tgisZ;ybH8xgHy1f2>ZcOo3wHQ+gf9#|&-DtpB zV}>yk9gJD{#%MH}n0}}=nv5o6j?v_Ms*;tO$SIKJK z^xFFAlgb*V&Ym_sa5}G&^~R>^Sxx^VRVsf zDaL$bL6vNk4;wP1ZeEk`U@|SVZhF&|A=qjxG#2?bLzgvZztF@hWoLN21)Hnnh|(D| zrq)IEt*)8u?jO~se)^QUSy7#*)^>@im^G_mR+XF*)3dI+Hb1|*wzf}Q_4LjYr#4hK zB}MuFbcsodnpHQkZdTp&n!4JkiL)A}%8zg}5hQPwbZYW0l9x?10?G-Os? z-IZ(;jGe|VZ*GfmFX6vvG43P$mjR4n)l+BJHI`nZY#I+5dz#;gXJS2q@vyPK&F%o< zzrF%#8jl-KT=NYW<7wkXZ{ZH(8RMXF$avOx&UoHIJu8oG{+=5jsiu zpIVIf3IFr|pChM?Gv2}$<22!aX)!(~{I6~M<2FA&H@@^KeL?u&T8ys<|NF4P4YOy} zT+#5rG|m|pd@AP&f2qZ|NccalF0ccx;0VT##?Nicej)r{SDH-YcjJ#X9)A+s)h(TA zGN#QJ`Y()&CTH@5|Bdi}5F0~mY^!n6_vJGF5Nk7UQC+_|>^B zKFxs6u=;7$lj<6;bS{{|W~ZyX%ur%$y8|#I%*bm(XvUZc-omYBtQlv<6I**?>p*NB zx5909HoN%aVkb7SN^UNQXBI`Im}ysY!%Qc(wxgq&WoG*_D1KnRS$K8ZVHOeF<&*y* zv&37l)hso;nPtQ#6PreCW*g=1X63az3A2~ko7fa$Q=6+DjMh9Zfzd(-n1lbJHN+fB zY&x+S%_|a^=+IH-*ncRDGpmTrBDUb>mlK$9$5yTcrY*EA(y%TP;0dYt$XPJ#=lR4WzHZfk=JC@igiET8ojUl$$ zDp~ez+sf-E%$_u<&c8DY+-wGRYJHpYBxY8^sHy&K-rwuhG`;2x-oh4>5L;}ExrEr_ znt#)nvr8B2wwe?P z<{iZ5^zrM_JVR${s?B@6RW0UvVsl-wHqA}u4sYQObF!j!JFz7Zo155@ zi7kcLQi&~%*wS~HJI#B|UFLnpdGi5tH?d_9TPCrU5L-FnFA-Y>v2}0GG?@ET^HK9L z^Ko=ApD>>^pE92|pK0DMGebmMA+co>TNbf(^^R?Br!aXr=JVdFt>z2ni{?wjmP2fL z#FqaSI+(ARuNvn^Ro2!`Z>pbIUpI@`atVLQzdD$2m~S@kRG7%1E#~3o?FtiD=2!p1 zuk+hwYx5o7tF7Sg)$hM?My+|$e9sGFD<(Goe-D|6N#=*XGfZCKUfDclo~BiyCcniz zg9hKzW%(!Or_HykOcG~)h6dl0>*~oxFuyd!+oTVg9EHvh-pwz9yd z%Jt&6|M{u%*xLHW8B?q0cb?NwU)y}nWHLPFd;U|DttfDcvg32}4_`$8B(|PavhF|B zr25V@Ik}lhQQ6r!ZL_Fpu~vI;;WjJC;w;{>S?!3e7qRswwm!txm)QDkGgez2Ejy!I zBH>RHTYq93=p9FFgR5jIa46~CyvbsED3)Pa-oh5kB(?!9jP4!R{IkU*C{~!|ySK8! ziEU5|61-!ZlY^OXj}>eAuB!rv5G#S$hFsovSzWvZTP&yLBDSH#HsVS`3Rbd}c5MY) z=~f1@4I{SUV~17Gs`p)>H4dw+6|s#UdG)LoTe4vDHC+qVXlvX*46Cg1#5RH0Y62_W-)EE9lIekC)iJBqszV2B zqBRM%R=ssy^I?hU9c`=g<@O9>n|9oqYE84ITMg)7&9G)N{jBSl{#Ijifz0&t*d`L& zG-7M;;+?i~wdPoJedUS-Vw*~AldjZi!1K?o#onr|*7cUh@)Fx*Vyh>%>#i2-l?n~i z?zP2e-Rv*MDQ(3#^zDDE#%ma(*0)n-`dN(@^}^>=1F=oNLgh|xRf{!=*y{Xqf_0a5 zca@yjyhdYUg?sD~0pj(5 zxD5~=0>p;_@ijnv0}u}b;!!|+8xY?C#CHMlBp|*Ih#vytDL^~}h`#{hZ-96S9*BPe z4hC>=fTJDY=m zPyQN4?z11(7V9{y+5E21oSb0J`T{w1NsC&uLbmpfIbt@8v%Vbpw9*L`GCF< z(C-8E)>6Q_39wcH)}4U$1YkV}Sf>E% zOTan{c&u}Pbpfz00oI>DupI~%fnW&;R)Am)2sVIV3kZ$_f)jw?&Oope2u=cmlY!t= zAb1!MJPQcE3kZG&2tEaba6pI%gh)V091xNKgmeZ%oIpqt5RwcJLQ;W{bRZ-X2+0OQ za)FRiAfyZk84ZNg0wEKD5HApNGZ1ns5V8XZ*#m?;1caOfLOupUJ_R~4K&OsCCjsb` z2y{vXI;8`hW&oY$1DzHEot^|by#RE22?(`$fKUktRe;bWAT$dI%>hF5fzU!Av;qk2 z0fbfqp*28g9S}Ml2yFyH7XhK`fzYi$=yO2mNg(udAoNQh^eZ6rED-uL5c(?+#sXm+ z5M~3y+5=%7fiMjS^B6#w1%!nFVWB`+Zy;0Yg!cx*`vT$pf$*_F_zWO?KHv#o z281sM!dC*}tAOw|K=_?N_&Ol`ZXkRs5Pm-pz8eVN3xw|j!k-7iUj)Km2Etzj!e0j> zB7um`KtvW0kpn~&0ujYPL;xkA0TGV?5eI;X$AE|@fQYAnh;u+>Fc8@Th@1mNZU-Wt z1tJdvkw<~ZV?g9_Ao3&-`92VN0f_tsi2Th1ME(UtF+fxh5G4aqDiCD>Q6WH7C=e9| zM8yD6aX?f$5LE$0^#h_t15smvs0lz+Ef6&kh?)XKO#`ADfT(#u)a^jj1|Vt^5VZw} zY6YSm0iq57QI7#pzWR@P3W(-`Xa^7-3Pgtk(Q!a@0ubF9h;{1kK=eK!`Vk=d01*8c5d8!Y?d$VZz!UvC z5d9_)eFTVp3y3}jL>~vDF99*ZKulL4rVfZ%3dHOJV)g+sj{q?*0x>TGF|PtKuLCh} z0x?H`n74qKV?fMtAm$Vha|Vd{1BkT&vF(A_6d*Pih|LFLhr@%|@j&bZAa*ejy9|h3 z4#YkL#6AJUJ_W@70mRvWxb{F?7a%Sjh|2`x`U7zzfVfdW+-M+fG7xti5H|&gn+n9u z2jUh2am#?Xx-?lvH9H4t|X5Vyet#BBoNT7bB%K-_j9?l=(70P(3nd=(JC z3W&cCh<_A_e;kN^3W$FOh<_G{e;$ZG0mPpH;y(f6zXamX0`cDf@xKD`zXJ&X5?COC z0}?s_33ebs1QJ4l1Wz)MkOL%?0}0)Mgx)|xUm&4BkT3*D7zQMa01~Q!gvCI@G9Y0& zkZ>!Iuo6hv2qbI<61D&dtw6$dAmK+K;ZGpZ4kU^|q7EdQKw>bE*a=9C1`=a|#CX7y zmydU&N9GR0XTaA&Pu@98*ugooc#gkK)^W|a1I5W!vW_=z&RRljs=`m zfU_EK)&R~rz&QzU)&tHdfO8t)Yyg}yJ%F*4UoWBCjOMvq)z{LVC9&oh>Tz0_a09*>-(gBal0$iN{S2*B` z0$j0xD*IJy^0I0N~mKxb^|A{ebH+!1W~HdIoSk3%FhYTrUHz z*8ta>fa@sWI_3df?*gv(0N00r>onl{1aN&0xV{2h-vF))fa^QJ^&{Z=1#tZixc&r^ z7$AuQlG*`D9f2efNRojh4M;M9q!1t}3`mLul45|Qcp#}WkmLfAl7XZ&Ajy*nB;^1} z`9M+;kW>OBl>tfJfuu?xsSl7e6i6BlB+UnsZUB;Q1d`SRN!x&=9YE3{An8>g>2)CK zJ0R(IAn6al9SXSP0Cxi5?gqGf1Ma?nyB=`Q0^Ci2$GskKZv)&r0Qbv)`zYXk8*qOM zxPJrOmw@C%AUPFCP6v|50LgVg@+2U67m&OUNPYxJJ_9751(Lr3QlfzrCyk11Ca6)kQxr8CIG3Ofz)n5YHuL5FOWJF zNSzI&&IMBM1X4EvsVzY2GeGLgKW@I`pFkRbv^XHm4Wy+2X@h{Y(LmZ*AZ;0t zwhBmF1ElTt0BHw+w8wz7<3QReAngp0&I0LnAYBB~Kvp{7$tnP{ih!)CK-O#^Yc7y=7m&3D z$Z7?$UIelZ16fCbtg}GY_dwQod{%i0kTVh>|Q{2A0WF1$es#hPY1G>1KDeU zYzkyQ0c1Z9WWNYxp8~SK1hUVB3FO=gJo) zW+3+kQm0je+iJk0?5A&$Ugw&9|ZEB1@b-L0Qo-x z`9A{%Do_v#6odl>ASprAicFb^moK*18AU^h^304R72C^!ied<+zP3KTkkLJKGi z0SZfj!d^gOAE0n1P`CgnTm%$u0SfN}3LgLp4+DiK08in2Kv4)#6ay5+0Yx=H(Nv&l zI#9F@C~5(UwgN>j07Y*CMMr?5?|`D;fucWv;$WaS8Yqqhii?2a?m%%*ptu1jo(B{! z0E+Jcid%u=?LhIHK=Hdk@kyZAcmLCs2fDTcx;lZb89>)8pzAQ8YZcJ78tA$d=z1H_ zbv4lS7|``YpzA51#0ivS03}&K$sC}>1C$V;WX zqkytIfU=E1*=C^ZQ=se|P<8<*&jHFyfbwoYxfdwE87RLMD1QwoKL(T^2P!n6!V?Bm zL;w|IfQmYxViHhsFHrFiQ1LKO@eNS%BT(@(&^-m{o(pu(2f8-`-4_AfuLrt64Rn7A z=>7`OBM9gr06iQ)k0C&hu|SV1pvPvQ$Gt$0`+y#w13fMPJ-!7zJ-Y%udjdUs0X%m*sVfXWJ>k^q&r0F^6%$`^pjH-X9{K(8R6mjLu~0KF=K zUW0&MLx5h*K(Ea}uPs2Y_kdoX0KGl~dUpnTr@@2X89?twp!Xu6_w_*Ur~f|+?K3(H zBI^VAZxVu15+Y4(t7~t&uD;gQb=S784G|QT5J+gjJfZjc03wQ_q9`H;%oBR=pa_D9 zC?aA4Nbe;H1V{)89p1x-x#!HS^I_(k|DFHkg`at0H7{J`#mjlIDlazT#iqR2j29>J z;w)aA!%IoLbR{oc#Y<_t^cF8gcxfIlE#al_sb87;HK>0D^`E7F1L`-V{ z^~HG<=SRFVOH48jhx6HVwDa@Bj@9Xml-& zZlTd_H0nU3o;2!Bqct?zK%+l-HHlZRuG#Djqjv!I~sSVaZeimK;vI&{2Q;=;PraEel@SB@_HLy zZ_DeGczrgn&*k+}UO&U@=Xm2`-gtsHp5l$Sd1DxFd?f0PUwLCaZ*1hv+PrxkZ(h%v z>AacAn_0ZMkT;j{<_|PUq)A$qbszqRDKU%%REmG+9oQH8fdE zlXW!7qse-j{7I8NQJUp;rI?gsG)2>RnkLZna++47>FYF2qiH*ucA#l@n)ae;ADZ^3={q!ikES2c^dp*% zr0I7w{fVY~XnN`YuW2z&FBLdW(~G1AQj5W~8Q*+KSW+QZq?yM`}k>yNF8dOKN{oN0RzAsWDQ& zBXuRIt4Q5T>S0ok(5yDiuA|xYG<%F@&(Q2ynzf)=gl6q%HjQTUX||ANCuxr6aWsE| z=FijoMVe>Oyd%v!M`_-L<^yOxh~{t8d@#*Fq4@}!kEMAI&2wo!p5_y2K85D> zq`gjBQ_`A|mQGqGX+22mMOuH-29P$0w0B7xO4=~eJ|}GsX^Tl)LfSIYR*<%yv_DDP zOxiZmc9Q-#(jO%KDWd7mkp3d+^+|6)dLz=Cke*6<8tE-be~?X{FN3r z)8aN-+)0bS(c&Mp_&-`aNQ*~k@fa`n&Godooi-29<{_eO9;3~_Y4ap)o~6xmw0V&>uhFIxZF8d8QMCDrHe+dXj5eppNFd`fGLpzhCZiS^b;-Dnj2pmW)5h$S31C zZIfvG0BuufJCL>$X*-Lyb7(u4w%^fqX_U+-$$W;)|B?9unf1whh0IsUe4WfDWTujt zMrKPg+mQJdnOS6ZAhR=>-N@`oW*;*9lR1#g!DPNm=KExRNak=dN02#^%rD8zCNugq znYm<6AagRA)5x4j<{UETlevh@#bhoeb2*tmlDUe^)nxug<~lO-$=pchW-_;txs%L2 zWbP-kfXqW=9wD=Y%rY`7$UIHvIii^tiNp~}AaXg8YDAKVTtTEZkt>N@P2@TvHxRjr z$gM=~AaXa6dx+deJ&5!s(vQdhB5xBJLS!hBVMIPA@+pzeiHsuh6_K$-VnoIhnM7nN zkr_m06PZV3A(3y1EQu0XMq~w%pNRZIWDSwuiR2O4Kx7kKN z>t3?%C+i`y{z=xq$odaiPm%RsvYsRBMY3Kdt07shk@W^yDP%PxE1j%XWMz<*Nme_u zI+E3etnOr8;(xOGUOEL?gUEV^tdGbVPS#wqz9VZXSsTgPN!D(%&d@HNb_uk*i+1G&lbbLlvqjw|T6hK_6Lc#2MePVscQ zjZXid(|vS$jZV$zlt!nabo!J|pV4U!ofgw+37xjkX)m4j(>a08Npy}T)A=?!|AWr= z(YZdIU#Ih%bRJ0O_vt*0&hzQKl+MfOyp7J6k{qNa5xttys~NpMrq?KXji%Rfdab6{8hV|hcLKdHqxawF{UE&`rgsXxThY4>y+5Y+ zD0+{k_ey%NrT02|AEI{|z02uyEq!jG&u#Q+K%X~>_DP}75c+&fpHJvBhdztxvxGhe z>2r)erS!duzBkhMX8Jxy--h&kmA<{{`!;>wq3qx&|^y@>vF{1iSq~B!v{Yt;}^xH_k68fE{-&y+Cq5og#e*^uWrGEqZH>7_T`uC-O zfBH|P|1A2?q5lT@@1XxKqH#p45l!S0(-ysicpf2|dI|XuZ5t&zo#+BCQ85GJ7*LG? zi43UEfY%xDCIcogU?u}*GoXY4rx|dTfzL6pAp>7!U=9PPFmM_JPctZvL6sQvGK1b= zP!k3XXV8}n`ienY85G^epaZ;p8*l%Ex9{WacD&u4w|nyT65jraw|{1E4F=a^@YM`{ zgTd(xZpq+r44%&5nG8P6;1dk4;GO$<=U=?@Z{8WmJMZ((Fy2|oJ8OAo9nm3|F(jEG zH5u{(LtbS_V}^`m$kz;sF=QJ<_A}%l@7~S35Ag0oyxWs^2lDRQy!$=x{>;0pdGAWz zyMgy^)}LYTGHfWr4l?W*!%F$^VLp6<51-=05BTsiJ{-x1JNdAH4~zKd zdOo_HkM887K72HokB0EkT0Z)dk2dr1jeL9;AODSyyYq4M(i+IeKk@PJeEbK)uV?t} z48N1%5r%hVcz1?>%kUKpU&$x6`Q$o2xt>ol_@pDBbmo&qe6pNRR`6*(KK(17-o&SG z@o8s1?aHU~`E)6tF5}Y^L_a;xrxzLVEF&5)q9G&3F=9F+W-{U+BaShml+PaFvnToN zX+9gkXG8hy13vqM&$jT{Ha@?L&+q5+2l;#ipJ(&=SUx|=NQ{hQS z8u3+h5MRyVt37;mp6se*S0}p$*)_?oLv}r~?G*>lLAPxex>w~)P;?EPdHkbQ{k zvt(aj493JWrZQumV@yNFG-pgYW7;w%lQHcW(~&Vf7}J|E{TMTVG4C>FI%9reOg>|_ zF=mITF}oSFk1-{TInJ1ijE!S#0%I>{Y&FIvG4=|^{)MrBW$ew2y_K=IGxko#-p$yC zjBU@@&l&q2W4G}&zOKU8)yNr6&IocwlJg}w+2nj3B`24h3FJ&BXBs&($(cjWd~z0% zvzVNvoFn9vkW)rZ1v#h5 zIY&;_^t5xbSx9mMV?b`P=ph&@2;VPcOG zdz{!4#GWShEV1W_y+o`5u|~ui6MK_bQ=+lv#99z*O{^`k2(k9WIuYwitOv2)#QG5% zKF}HjLQE#6BhVIk8d1z9Kf3Sd7?sVv~qXB{qZDY-00>EhP3Wu_dBn%ZRNY z_7kyRh^-;^JFz@s8;ETpww2fpV!Mg$BX*Ej5wT)o$A}##R!-~`v9rW3kQ>OYMDAte zRwXx)++=cVkz1GCtH`}JO78XK-bn5(tOHL+&_oCz3mb-09@bB6lvi3&{P3-0#T!p4=bET}kfG zVm!1~V>~anl$# zgK?`Fm(REjj4NT>X~vyp{7sC%oAG~V{L74ggYium-;?nJ8UHrpXE1)DsPW%0{wxzJ zF`+UO?qkBEO!yZQIy0dU6Z$bBhY3@dFpUZ8nXsJ+JDC{A#A-}TWa48?e1?h7GO;}q zdoZyV6Gt#Hn~7tYxRi;jnD`453!+RcVd8NnB{S(NCSAj%|1#-iCcVO>ZcOUWqybDC z$E4{@n#rV1Oxn$)y-d2u26sD#!wFl9u1DHC9sZ*IckEsiox{0Z~nYx#0aZIbm zv_z&o#I*k~?MbG+!L)RywPf0-O#6yyW0V-SEdy+t(<8mnV!t_tC)Tb)1PMg zOH6-R)btFdcVv2JrVnNMr%eBh=`)$Wi0R)leGk(QF};`>3Cu`hMlv(*WyU|5@fb7G znDG`fBFq@ZjL(_z1vBO_V=*(9Fk>?__Ap}~GcRLia+H}hnfZ5SKE%vNnAwDxEt%Px znIAH9Br``b^E+m)WacVn9%SY*W|lJRdS>0utUH-ipINUn>rH0$V%8vL4Q5u1SyP!c zomo4Wb&y$w%#J28`$}eC#q8&q-H6$*F}n}52QzyJv!^h7F09MAdCXnU+;Zk#U>@fEn|c3Z-t){G%DhjR_ZjoPXWq}uTg|+~ z%saun3g*{i{$H7Y6Z0PzHUC-WKgax*%+F$ed**-6{ISf>Vg3)y|CRZ_G5-Yf&olob z3vOh=T`c$;3mUNCO%|lEpf3yFVZpmBn8t$nELg~bGc1f}VFC*qu<%V5rbJno&B6&R zoW#O17M^3_1r|NdqGwt39E-kSQ4Wi8S#*>|Cs}lwZ=U6w27J?yZ^rP=M828KH#_;J zfNzTU_IAF#mv8@%Z@ckrf4&{Sx8Lz?bS2-eVsTX#*J5!U7N@ef4U5~dcp{5uv3L%P zOIdt|#pn3$5x#qp@1EwnL45ZC-+jn;>-cUn-)&_{O_p54lIvLV8cUk7B#k9svLu%! zcUZcLrR!Ll$M@Cwz7F4C$@i`KzCGV}1kSvHqthgo())Upbe-@)?xSbjgtJFvVb%X_nY zDa%)}{1<+}4^{Y~8b8$Mhu8VxO@8>2A9DF&JU{H`ha>!Oj1{-A;_s~Z2P=B9Vh}3^ zv*I^aY-GhIe!Rs0{P-_^{5L-i;m42p@e_XB!jF6TaX%~XX5|B{e2A6pSlOMGJz2S$ zmHDjPz)#ok)6M*JD?fGQr(XQjho4sR(^`I7$EsSax|UUcVO2X;b!SzyC#$|?)e2Uv zuWc3TIeu>qevwAG6b69<8{$uqSR-fb7`}y@>{Q7TxeV1Q{^XsSl`a8dF;@2&# z`3q}qW6d3`>BX8stQpLjJ*+vznqq!?hTrP*TLXR@!Ef38HkRKC_^pKBj|l{C=9>&$8~H ztb2-e&#>-Y)(vOfr>xt~x&y2$;E(^~kH`4qasG(%$9tE=`~0zrKi2U_9)BF?kF)%7 zo;>8mlUJR*B=YVg?;-LYA@85$JxbpH$a|i=apX-TZwh(S$(u#qT=Eu>_YHa9k@r1$ zKajVQyr0R7{z~3j^8O%iJ$ZkUw}rgz@P5uz_hmt>x{Ex~1l>E=h zA4UFGi_b0lfRGrgX9;HUrhcn@{f~WPW~zK&ys(E_0?FP$ogc~*JS-QtiO)+Z?L{K z>oZuN$@(nTcVm4I)-PrKudM%#_3K!l$NH_T-_C}6qHK7K4UeRIvYP^ z<7hT!v+-*-Mq_N8%EswzJjli}HkPyT6dTX*XFPu<@aJ><*_c1y;LjBPOy$oB{5h9D z=kw*`kkZtd?Z5Z3~*tU~xyV$9@~I~%d{HKIG4 zu``XGU$S!|J14Vq8arpOb0IswVOLFdUC*w+vg>Ac-O8?e*mWwfkXqKg#ZZvHQR5{vW$XvU?o6C$M`m zyQi{y9=jK?=Q8%xW=~!AT*aPi*mE;`Ze>p!_H>oPjB|T&7ODIvynY}*|VQL z1?(we&+#aGPO$eu_CCelXW08c_CC+vM(ll!y`$JWfxVO1JC(iD*}H(fi`ZMt-qY+o z%f3qNyNrER*_X(^WcFRfzH8ZcJ^OBC-!1I>5BoCM*NuJAG3=YjzIp7M&%W>3w}yRd z+4m>=wy{3{C)r<<{g1H!S@w5ee^2%gV*gO~k7WNS_Q%*i zp8b>9Kb8G6*uRkI{%_g8g#F9dzk>a1*uRlv@abO(>@;R_k)Pc<$*u#PS94O$x zAr2hj;N={=hlBs(U~3Mx=U@*GMmac)gCBA53l5Iv;1~|(aBv(4r*m)?2j_Bd0SCX~ z;Cc?8q2O`~{z1Vb6g)}6%TWqmp`Zx`sT8D9(2{~S6m+1VGX>o!=t)5z3ItV&@bg~=4w zqVQS@ucz=v3U8tCb_(yM@ShYuMPUO98&a4;VKWNTDQrby28A6d>_TC83VTu5m%?`_ z{D{J@C>%rKL<*-+c!~HboJHYW3csW9dkTM`a3zI5Q}_pkTPQq8VIhU36rP~)B!y=v zJWo+&imFglouV2P)uiYeif*RpUW)#YqQ@xuH$_iU^bAG+qv#ciUKLgJIz>$=N~Nd` zMI9*WOHqG{-lgb$iaw-hI7K5U%BJXRigGEMK+$B1=1{blqMs>RO;J8Y8!6gM(Kd>9 zQdB_EA&QPrR6$_PIW&bs(>XMYL*H=d zI}Ux%p&vN3l0&;VT$#hQIs6!hpW*N;98TeITMlP(xC@87bGR3W`*JuM2iWgJ7l;TwsucLS~#Rn)ZptywM zGKwoGK27mCjwEp8a*kBvND@b`;K@Ak!Bo8 z=SVA#WN;*tBkyozI!6|BWG_b!bL2EfD{=G+j@IJnwH&>kqc?K&7LMM|(R(?1KSv+p z=s!97FOI&!(I`hh(KO$A0Hn9>+FtY!k<}a_kcS zd#spa6_mtNl0ZokC09^Vo02OjxtfxjD7lrAJ1DuEl6xq5n3AU{sZU7?C8?COq2w(} zvMA|5NoPv>P|}~0fs_oUD83pMCq-R-a+Zzl-@(>!<0TlX+ugcz5miw zO4BH9NogBO-=efLrQIm)NogNS`&0TZr6VZKrgRFW(_b^fyY^ zQJPQbMoLRKekI3m;rL4&Z_M$Q9Pb$AcxR6H;dpDRyu^tHoM^;}#+>NHiBX)G$cf)Mv56D=I8n@r)0{Xf zs=N~Amr-7o@I&xC~rx58_GLS z-kI`ll=q~(59Na?|Ag{v%BN61jq>@FFN#vWnDV8RFQiqT*I6?x5m6DjuWazf?4$;x#IoQISqXD=MNH zRAf@og^KP}^rE8gB?A@jQSmtyUsExiikVa_qGB->OQ~2+#gA0{M#VZR@~PNJ#bzq@ zP;rQglT@7MWE>|GIC(iIt8p@klUEWwc{L}mvt>)AkPW{fQKRC68Q`Oq7Uj%F&g|sOdCpei>`k0~gtPzU z?Eg6X0%z-U_7%>)%Gr*b?Zw$XobAuqft>x6v!8Qz6lcHU>=@4dm2-D;?jg=S!nwyd z_ixTU$+>4ZSD$my2ApfexyGE!;9Mr>+HtNU=Q?w47U#a<+)teQnRCB#ZY}5j;M{u7 z9pl_7&Yj`hdCuef6`Zfl`71epHRrG8{AAA0;r#cUU(WeF&Tk-keiP@na()NrcXNIp z=MQqei1TMSf1V4t5YL6mT&Ti@>RhP7g_>Nro(q5F!p&T`l?y3cXv>96F0|u92QG}~ z!W=HlJdv zDcB^~D%c^|EjS{CAcWdNxKapL3E@T|{9OqDCxm|s;eSF%7eZSh3>3l_LKrWEi9(nn zgy}+MaFd^=9A?^zyZj=x=L5Q0q#7!0AW(aY!g}8Y_+!7(~dm-)zA#SA* zw?&BCCd6$Q;&usfdxf|ILfmm7u3U&aDa2P2;wuaBRYXI4bs@fn5MNV>zeb3^PKduj zh`&jQe^`kBrx5?B5dXLk|AY|#v=IM_5Z_XWZzIINCB#RB_^v{HUm-p!#19hUKM>-- z6ykG)_(?+id?9|J5dW%Cd99} zUP!o6NVr8vxLruNM@YC&NO(XZ17Ap4@Di0DWe=Jn~ zM5sJUsQi^sd8|-5CRCm*RGubOo+(tGBUD~2R9-4n{#mHJL8$zvPfBes1g;b3>2z-BvkoCsPdUmTsdz=R(!VLe;54)%ilzr9#zZLe&*Q)s;fkJfZ4(q3T|t>H(o@p-}a(Q1z%# zwN$8jLa2IDs8(61Rz;{*U8q(=s8&;`R!68-PpEc{P%V19Q0-2k+TVm~_XyS62-Ug> z)w&7QdJ5Hg3)Pkj)z%8t)(O?}g=!mw>d8X&>xAmp3)OEFs^2VBZzWXkBvkJrRPQcS z?(@kb%?Hz9GOXh_^EB<>dyi-g2tA@P`yctS`#DI}f|lH!G=>O#_0Lee!t z(hWk=O+wPGLegDA(%*%odxfNjgrxrnNzVyMjfJE)grrmCM10!B#jo5z7~>lg`^2W(o7*~k&v`pNLnEzZ4#2U3Q0SJq}@W& zJ|XF#kW?fj6$?qngrws_Qn^qgPN-2ysBxK4ql!?Yrck4{P~!!m#v4M7CPIy9s!*f3 zP@}C-BU7jm6Kc#9YRndD%oA!X5Ndoc)L1Sg#|gYQ7}YY#`KZ zDAasSsM%hqIZ&wip-^+FQ1cI==0%}aO`+BeLajT5S`P}f9u{gnD%5&hsP%+U>uI6Z z8$zvgp;k+wR$HM~M5xtXsMSZP)laB3C@R!?N2v9lQ0qgX)^MTL2%%O?s5M`xwLqx# zy-;h7P;0GFD_^L!QK+?9sI^0=wOgpQPpDNa)H*HHIxEz!F4V3e)UGMit|Qd0C)B=1 zsQnkA_FskCHw(2N5Nbax)Q&zX)P7v3{e)2aX`%MBLha{;+Aj&U+X=OY3$@1!wO0zY z_X%|_7wX(4)Ok**(^#nUhEOL%sFNwwX(!a_DAeg9)afqN=_SBg}Q@T=`Go%I3nA1BEN630JNWt~@T(t0dG*6zW|q)VofocdJnEUZLKDLcJ%1 zdd~~>UJ~jx7V2dR^*Ral-Vy2z7wXLu>Ma-Qtq|&M6zXjj>TMJ1?G)leBp8b-BMA3%Cyk9aJj1qHdNG2u~ypE0LQ3lf#FJy*6 z=A<1{Y%RV+=0YCBtAQ2D31jgTaj$+>F6|41R;b?HJsN!D0*^#NZFu4_09C1O_WH zcm{*#F<67aS`5}<@FoU-!(a>6!TO$9ACC0^>kX_ov7Tc67_3jm`gE+%!uouyFT?ud zSbq-dFJS#8tgpiQI;`*VZKrQzBsK=IF$NolU}G#cPQ}J-Y+Q$px!9P8jrrJEfQ^ON zScHuSu=nhR0!e zB8I16_-za)V|Y4--^cJg4CiCG1j7{=zJTG27_P!_4TftmT!-OC3_rziJBB+j+=@j1I@BKOUnaFgh8d2^f73qbV3o#b_EvXJRxRqjNF30HawL zU5U}v7+s6e4H(^s(JwLj6-J9NdLE-UG1`L71F$(3n?J;6ADgqWc^x+AVDlH)Z_dT$ zJZ#>D&4;l0Cu}aq<_c^+fz6fJdkJP+fA7%#^79*pn9_;WFaO? zFu5C(WtcpI$)lJ&hRF&{Ucuz=nDpPpWCJE2V6q95&6sS%WCx~uVmbuVp_mTCw2ohbQ-2-V|osz=VLk((`zuj8T;uxOmD^X zHcaopbP1++W4a8}$1!~l)3unshUxp5eu(MEnEngXPcZ!y(;e6nTYF&Z0Bj9ltB$P@ zTTN`G*lJ@d$5szpH3M5e#nxhMJ%X*Lv9%goFJtRfY`u=H_1Jm~v!R#` zU>0B&Visf8!mNW?53@rsI~23=m>r4PF_;~X*#yicV)iY}zJu8nnB9We1AWYvWA+$k zk7M=}W~(q;joI6ny^Glf%s#+u6K0z++k)9vY!AWqP;3vw_Hb-}0ox<79bh}ec8Tqy zuss3WXJGqsY~PCQrPyAH?RD7he~9fJ*cpzULF|me&S>n6!OmFh9EP2`3rV7V&~u3`44usV|Op??v34jusZ^~E$nyO z*v+xq!|s={I|jRFVfRAp&c^N>>@LLaGVH#F-SwCc#e4ws0P_~+9n1^NE6fkX{1D6! z!+ad(<1s%H^OG>2jQLc|&&T{0%$LwNzYFtwF~1M`8LdVVzC<*`(m*l7W-o{0*e7G>R1e7aWEEN#o~A@reHAxi*vEK0E>&TxVVqS zPq3JW#X>9=V{sQ2_hRuoEbhnRX)IP_@iG>#V(~f_>#=wXi}$hEh{a|swqUUp%e}B1 zg5|zg?uX_6SPo!W$1=jQ!tx+2kHE5jB$mfvc_Nl4V>uDaNm!nWeONBX@-Zx*#PVq@pT+V;ELUN<2K(iD zEZ@U&3znZ?xgC38Z+Gks!QN2p4Z~iHy#jlOV(%2}O~>9W>|KeypJ8t{_O8d?BJ4ej zy_d1~2KF{!Z!`9`U~en-wqb7vR{LW$g1%J^s|Hq0tP-qBtSYQVV>Je=u~>ZttD~?w z7ONAmnuygTtWL#h3RY9GnugVRSY3wIOsuZJ>ME>$i`DH|-GS92td?SRKUNQ7_0Tu4 zdKjxGvHB3Jf8t<-gIydv2?xK4gBRi8)i`)P4&L;?X9M=w?K6&C@EQLf2j~75Ngv{2 delta 37910 zcmZ^p2Y3`!7lz;2%q(ShGqcl@klx8|mUKu+D4}-(p(X(g5Q@^I*bsNGy|+ylHYOesgol}e=wsZy$z`bxFZAZe&HLK^Kz(kjWnEiCz5lIC0zS1aAPZD(9} zSGemPUAKCcdrtP8=2_*5c+T>i>$$*lvF9?+m7c3T*LrU7-0W%dtoLm6Z1&vY+3MNm zxzF=}=YO6@J&${y@;vK#!Sk|bm*;iQTb_43dp!p{?|VM-eCqkabI5bV^PT5M&o7?e zy^>e!aZ;ChFo8`^*cK3#Z-X7i(Z@IUZx3{;Cx4(CwcZhemca(Rm zcf5C^cZ#>sJKfvto$a0LJ>I*}yTrT9dy@B5@9EyK_e}3O-t)Z|c`x-|;l0Y+>b>53 zlXs1Gop*!xR`2cJE#AAm_j>R5KIDDG` z%2(s-=NsS~>>K7A=^Nu4=bPZ0>}&8%^Bw1#<(uQ1?_1zo>|5$v;XB2*(szbW_|Ep7 z=ey8%iSKfsB0R z`26@_e2@6j_=@%~G?~ z95q+XQ}fmCYJpmz_EdYRm1>pRTdh`W)M4syb%Z)novcn#r>YI=9CfZbPo1wGub!YT zP#3DFs;8+d)zj5g>KSTSy-2-Sy+plKy+WFcA+J)LRTB~-gcAa*Mwn5vdZPV`2 z?$z$o9@n1Gp46Vwc4@C_uW7Gq2egCQd)oWjA?>jCwRS}NUHik2U-J9>@qX2>`BVI< z{xpBOKj<&?hx|SKz5Uhx8h;=EaQ_JZNdGAR6#rCzgTK)~*FVoc-@n3tQrLg8|1AG` z{tNw=`mgX`?Z4K4qkpyk7XJqSX8%_IUH-fM+x-vwAMro#f5!i;|2h9o|1192{k#3| z`1kk^`akr4=KtLPh5uI_UD6fZt#{Qux~glssatxI-c8TYGxc0Oq?hVF^=iFVAD|D> zN9kkriTV_Mx;|46&(jy`C+aKom3mk|M?YV`RKHTcR=+`S)7R?j^xO2?^*i)C^)31X z`h)sI`v3GN^q2IP^_}`F`Y!!d{WX2BzE9t;AJ9M3Ki9v|59xmx7?R;K;tbj7Y z;WOe5-N-WnM$jlSij8uk!l*K8jQ++zV`$hIW{fgM8xxI5#$;oPG1X``W*W1M6O0AM zLSvC}l5w(eiV-n{ai(#Wakg=>afxxMagEVxTx(osv>9uSb;efX9%H-lknxD|xbc+n zobjUZim}Uh!+6tp$Jk>WG7cMG8%K<9jBkzajPH#fOpoa`eP+C=nwlB*o4RS3`DS;s zzzmo{v(OBgJsIS_Ym4=m^|$#d_6x%X-`T$~t5nw!XHG zSl?LRTHjgUTR&JoTEAF-*e+YKJKG-HvTZxTPPB9ETszOsx4YXt>>|6&F1LHzeeF7X zoIT#Iw4JyHr1VI2o0Jh5AN@Rsxl*>XdRj_9jbroqk?PG-cd0-MNI|Jk3Q0YrBB@v^iA;=4 zicF47iA;?&L>ePak!g|X$2iKJ+N3P0m$Q0WwoB^m6feznNqwBHNhMN$Cv|$BOB(1L zm!0F1hB(FDhDpPny1EfAX_WKWfOO}$DczjkyJ^x`M@>%OHaB^)J5kbHapaNba!b)* zZj1$U)1Sd?uhJsbYmUlw8IkHuF4JYXY*&IS(Us)t=1Pv-99bP%6KRXAjjW5TkKA%p zrYkL`%$4bEO)7S|a$?e4`Ob0Kxl+J+aAldx6>>^OraS#>EvI<9<#H7};`;0^$IQzd zH92)#W6q?wMDko$$0S}WX_5NKb1{j}N9vqM6SJIe=RUA)efN{g7fPDDa}?$FL<14h zx+qLY>mzkX)w|V>NZfj)db8Vbn{LZ(yA#}r?j(0Ncd|Q0NVf=SgOD}~X_Js{71CxQ z-6o{lg>;9I?mQ~ao$k~oWxCv1&gyA7Qf@3zclTJJ?x2%8Jzwe(TLO29Q`~K+%UvE@ z19z{#tbyus_l`xlY{rSMA(H04DyF@)UHg;m+Mnvse#2k1-yGGx+P%iz=3eVw=U(r= z#l6A3(Y?vZoSEzu(#~TlZ);b1XH4Z??JDnSSGhf=^1*hM54Wq_(XR4|zpH#Yrm|-C z>2cj8tyA}?O7c1tMl<|PY$57}^zG5gKf!tU<_xDaF=yL`c^k^zqeh$~C%)y3ziCG| z^19v}HSF3JT|(j79ShL4r(+R~uD3<1+q&K^T<;v!-*t;qyfmw;hsV0!9jV@;J+>`n z^>QiRv(U51v)Hr5bE0RdXW6!E*7R|yBJKtecjHkz&q>au3xY1ssj=`qr^hymC+v({ zS1g_B{8E_j@|@#LTbS$eobOCqnBd&7Fy7_4$hmD{iOX}Tvu|O*<+;My70P#cu5!fs za;epsw`i2hbG>ulq6(MiCg;mVeO;b4PH1sqC(pV_(hM!vIe1HwbLwKl<=Nm|x;W(W z-0D2MxYFgh-T7g0vCFf?DOi&2^4#r=S<>C*xz`!9Bqe_AwfsNh%&he5{yeTNX z;>0h_aCu&fEw<-PXY$e#muI(gVf2r8orjm^x;*=172tW#>06fL@_gvjEX$WX&i=Ag zXXmY6>2v4OW!+t#ubcyA`7Y1b&cS5?m*-o@w>;bB`N4^7E|q?EE-4SXJij>yHW#?O zI1eu`b9v*O{a(M$z^2q_0H{>ktmF@Bt zI}1+Eb$QF2gLm|hdOF&jc~X_*uMD`nHBO((5|_81v!t@nE-ec zbKW^6#F7IMTRF}KFOP$MB7P!1C zV%^PqigRdXiOajvS#o!gbcVC$?qW$eyY4QO&UUt*KFsAk&)Isq>BO%pcX=;#7W64| zc`tF=`UG9x%bm79rla;%oJUvbF0bQc-jgd`<7D>Dc6qOJ(#|M$d2e*;&Iq`?tDV!& zC~tba`)aww#gf{CbAZ<=y1eosr~>>gRWPZ*yk!3%R^^I^oOOd+qh%B$xLt zr|$k@$Jal{<-NzLt81U>7qMO5?arF*h7N?J?BIQ^mBP%bRHWp%;nwblsqs>desrvyxoi zx1HKc+Gl?{E7j%Q%^6(m$FToM#6*xOD=;YLl zFkQZGS6&lE$2;K>=`LTYvt~rC%a_qUXgP;2Omz9OosyBmUA{c0WMs(M`BG;T)t9g%cwanUxjn%;yRbF(y6^2|5t<*Rk-#x%HmgPcPbmpc2#q`7=Uoy&I?ICU?0oUWIp zyL=;@0~e(`uS_tUX_sZVe50N4WuslbI_Krfrnr3dv1f#Dl5_Z)Qs?EmZZ6+cN19mR z^q-(QuZ-*L)IHNpYHIg?bY$8UewS~CbJv7Y=kPPB(oAQ^6*FAE7ANz{u`b^{r*=xa z>zBt{F5d~x!&mlo`4;^dK#`N~@}20^ISnq~a%atmc3t}%)#W?+sF&_8-)YX}&kvDS zId@GM0IZ=t7p1=7hHMGD3|ZzKLd;FXk z-c5D+HpYrIW1{60T$kYTZFVl-IYGL^S+l)e{NZQZ&d%#nUB0c(SJ#c`n)e>K1W<%=#=d;-5*41wrF|I=v||nb-R}OwrF|24}Bl`KK6a$`_##QbwF6e zeJ+}`86nF;_6pe$a)OYPg`6(rY$4|hIVj{ZAy*4|ppb_Od8Cl*h1@9QnL<8Z z$cu!$RLCoYe5#OF30VmF93fvIMo)ehLu0myTT_Ln%TUN;g8+UGWP%!S(X8ddZ9>j_42J79AM zpgd+=^oEhxt9|bFj=AcnahWsj^-|~JH#|n)cEFt-fC0{z`%9cXZ&-qx_>cJSRD;JQD-&ROsJjrw-LJsp6_&Rx65CpNT?-`6od{g}i;=k0g> ziOubR`#S(FQNVzhjC;M3KPi4*JLJI*$bu+jkaO_38t1LIbtnDpge!LsbIy7@!8x)g z)H>ZG`JGSRPKxY!Lw6@SOWqmh+`K!%k<4^wiSBng?@4yz-bs*ZorA{} zIlK3!I8)zAa3=2U;mq5kI*+}RAjO0@&+XBj>^)KXe$L5z60Y3a*V(ft3hd*Qyc_fC zRPEE9tKLm;X6~zXK7BVqnjBmI@Al<8efK6fW&8U(SME)a2FFGZ@9ylpwBPG|yEoDK zdVj4mVqc;&POo;(Ind2nyDw218$%sAkd_qxZF}(Vbp-!oH26Nz0%<*@N}4p!nZG|% z8XYq^=YZDwM>N>o`x9FqSEb=j>4C(f(Wb~Nmd{_{Ty-GZnYcIPq`#{;pTFlRN>EcG zHS5(xHA(HJCX2W)MBJAm?kf>@XuX=MrV&yzMBHH!_jT+=@*5}q(8|em&-Y)xZ&B0A zlG4hag&8G1%Zpcap1QJUd3i-)M)9g>*>uH&tp?TND0iV6QhTUHBJMj8_q~YwLB##I zUM*2e3902G?k5rVbBy~JC;rfB&WRsn74=mI{-@m_bub}ysEGSb#Qh$#`@?zsgJ5{H zI{rW7s8=TtQYQ&n60$35BgbXe-dA_ml`WTC)U>jsvZ}N&qoks$a@Ew8WhG@Le-ENb zZH^L8Q>Uvl)Z>J#2-z*)Ka*(c<9A*({x)~hSjlVZ`!ej)3BM!#~{uxTTPe&(IBvbZp#qF2RVQYqAP|C8)I z^?X9=g+exkY{f*_&dQH-i!N8M{!iuCsI7$5>x7&rC96Y7(M z)Tf1Kj7tF64rkQ6Rd=Q#U-bze_BkWu?7V zO<7r1Qu+7w-J>3ea=xqXRrjg;gNio#L+WAmYxRhb%Y|Ga@(e*T(Dy#O&g~$PJrX(tky-b<>gwX{kaU zB;>&{yCKo&pIJZRv)xTAODcNyD$FP;uPRx4Wm#ydV1=?7m1iUw;VqIN^Hq1rHQxR56Zd7_Xf33>8* zZKO7ekTyohQ-nMDr9;Eq9!d zo1=C|8_w7xsm0py+Ty6;3EBc}p|(iKvxGcb$Sp#ivtC=Gok&PqCgiz7o)>dC--$nT zQ_(8z%xKpBZL4T!X=fAC&K2?rLS7KFS?IiTD6i;Z?TV<)zcmx>O3fjpT`lCrLS7QH zIngOPToArNTl1gG-lnZ>4{Di^m$wHMUG_)UymDsurj=!t<LBhOUg>h3o|Op{=$2|_V9no zd_;SckoK66PZRRW81Lz^2E4WNXGa=WmR6J(W|Wjv^o$ah^eivTC@b$E4r|Y7Fa9U< zOWMnXv{!_DhLFQC=17$J!5hr`s~T69R8>VK_bgpCWo2ba+24!vO>Iw<_FvcUU2QKR zZNHGu6rJU>V!UTNBfbd~YaeQ#MXmmIuY9h3K}h>b$ma_AyqMAX&ibg)x7ttt>FQ_g z7ed-^LcUPQ7sZS&c7Fe+AROoK{9lgzUHn}M`MpBERLGabY%Y)P$?LnUJ$y;i%F?ou z=x(p9D39*;lH$KVmHoOu;lITA6a7hq{K-PTvWt+N81q$5?sxe`nf|>0G|KmPC*%(Z z`5Ga&#%!*2R(=-@7yB#zGlHJ}UWEKrLcU(eH^gji%&xs}*tGf~>mQ7D>(Z*qRZ~`$ zR`&YaqWSyz2mPmx!TuqH{KJHNvyfMdIQfW>*Ej{==ZDAm>;F^81ph=r{>egKE97-C zA?ss8CM8aQxae>5uZz^Q`PT~h-ZuYwA>Y^PcS+ev{!Nh?ZT?$@y!~}P zqYGd7Z}Z>ozr%m0)9vTfE)NL#K_TBCsXr=yaF8%MVlKmHV5BML7x_eN_|7-LAPsk6)+!gqDMBP0mkE@{}g#b-VyWmXr#U) z1uJy_3;vg)OfL%g@izaeAel~hP?BOH-`%$_Ng#26w-ADdA{GUYWUWm~>A4|j4GdgnmmH)d)%?AG= z|6%{v{v-Zx{ND=sB_Zz=@-892CgeAS{MH8l_x>OJKl*?2|Lp%o$h(DnK*%2o`C}n} zDdev^isAS6MSR)0;t#`l?)M&@D(=oQPGXD(Z6}JG@h)V+M)3@A%Ar&lvPoQ zX9)SwKP86G)PuSn(|9bz*Gln(w2^@}_BFA?&QWBiv#`L7W2H~-|nS`X?8 zG5T*~^l$%_{(2#&?bF;H{5R>Vqx?4u`TI6~jgWt6T`o(RDf%t?rbx|NeS^MH$Uh1B z=e7E+`eq^jBII9N?~dcj%Alj|;^mlrBQ?3B})`<0<{QDBIKeGy1bai4%$}6lICXvYoAS> zzi4^O(mBnuTG~tEF++<~x1mPj{>-Xj8mW<*O@?LIMuPv4kz{l;l8p%|LP--!hETGi zT~f&rN}f=<3nj40NHfxn3?tLXGO~>vBUdPeLMahSxlk&FQZ1DJLK!HOp-~U9dqJan zthXuYtvkKasQC7=Q2SVBbgZ9qc()#Jl(r9LM~GghwqN_%Nk;qKo|60L^U{dj?fv^T z<waYlyxZodv3yq6}QY(}JM=35hoG9XosEk1|8CMxs3uUlShQwrqjT?-c zqlmx6y4qMHlwm>{p56cK%VXuA-T%BLvA-Ovs9Pe{n~e>|M&BF8t;S~GW4`B%JB&Mx zEkYS7l+hyYC83NJ$~X~si-@~dDD^^_D3r+^)p2+GB8-Zt$L^tx`=Upt|5)G$+Tmk5 z7Wm;m7ueX*K3Uf>`J^*_-yqX?rhQ_3$HWU!E2nCoKi+t`eR4vy431qHcl8*rwGU5< z;QV0>b$0p{kMVZ<*pvuP|DP|O*|CfE8XrY>(LQ6palkleyl1>`d|-Silm?+R3Z+RX z(}XfzC^Lj|oKTuK86O*;7@rbCHTIE1NGLOfyN_`96>&!*aYAW`Xd>>&46SeL<+_yA zy2&r)w!WfCkAxG44;nmq{-W87PoB^`b3pUb;man?Up9Z{f|d>`Kbfwml%I`Xj9-o4 zjNgqvOiW2AvxG8RC@n&nBb2#9nJ1L_LOFhu8AlFzrrYdfb|%N{DwGo<(Qo*b=Y;Z# zaF;|h;jWoBe9+**v*#}_UA(k*X-o4AO&dOFaQ(c+Cy!}9Y5v^i<%^fj(Cpe}%jPdz z-n?je6uEfmvQf>aE?%*`^%7k=C)_xE(BKKpGeo?tF$6_RAftu+_6UY0MV%I0EoN$b{psZcggWO4Lw!Tx_s7PFD@CvAn5~h@b>>y()#f!qxkMCBivCN$HMfQZ<~7~mH&S0 zZ0r{b z?i9+FTg;!#pUqz+&HPO$TZM9$P_{*nY4^_1)b^e>q&2rd8tS(ct5c+=&2kIn?&!~= z>RMM7NO}D&uN5C1_6g;lHu9n`w*G;O7cE=7pk>JX15cqz)H8WB9&{c3@cM84+!PKwN|#3Bb0}P@;_&6 zYG!!aq~@jbqpu{(Ce3d-IlKR9{bP^o4uYW7;~xY?RDM`cEPe0Kknw*Jl1 zwkoWik(xCfZ(mlGRox-IMktTJN;vkjNvmH}eSfP~C{GCG=?>?EtfBvKKFk^}lqZGq z)YPNTn8i!8`>$N{H`iWkj5Y2bcH^yjp*$m$XJ=^Y@Iix*HT7`2_C?42jlj01SPdOg z8-?=x(esNnJ*squb(~OM5K8oQx&7d0&9dhF!{c0Qo={#C%1d?G{a5}os|&3qF?hIR zUFKR#WB8rf{TFBaL(WN7I8w98I@vnKI@LPOT4|kbt+LJ#$}XY2DwNlR^14vo5XzfE zc}pm7Z?Ynmu+Fs3vd*^7vCg&56UuI(yf2i)Lit*_UBc}aZohB`W@y{Tno^uJ(z?>R zK2o#Ma;&SYtF3FSR_j{pI-$HHls!UuS15ahvQH@cg>qmcAwuMk!wc3Lt1Wibb|j)j zv`9~(92ClXtv6V}(5lP6!e${F>-X)Yn?fBQM*IQ3aluTuv^@jCk>*7Rd zO!GU|2a%dvtv%Me)?RC$wck2m9kkxF-WSRdp?o8hZ-w%mP`($+4?_7-C_f41=UWL` zA6Xw;pAfP>vpy$eeHl%y@{3S@6-w;;TICPnCOYVrTGNuGUEz|dRngvz$NI_oSt!4) zX;<{C^;^@Q7Y+3-%hy@ITEDY+;+XpSmgPtBw$zS~?LJ#RmVN6dyVIW&)=zfV=!au9 zZreWFAE{Yq$J?r{3AY_5+_G>h>&%~`sTa!DSk7DDOpzJ~+THB*NX>dX*-o)j?KI); zB;1{ayNhslU2kU)va{@L;r7JBi5G5lhGq^QG5<*w?`BZo(b=o;JEb-KP&9G`RJqG-+RYc}d5k#A`R$O;JIO!kye^PZREx znC7tEY|o0+wAnL-JGIT8E!=5;eaIgDRe(LuK0bOTnI@D~ZT1Pmo!$}6B6~@6YNAk1 zYqL)j?u@!)b6$IeeR7A-Q-nKqNgcZ{v{%}zqHYJa`#nRrvyYkCwy>jLoSW0^6YaC@ zbA+-=xHE)1N4Rs>+b72Uy)ZVC5&M5$>vj>GK>|0|Q z?;+g9Gql$1OzHHrE%sd6%$yRvn8 zmXw+Nj=lFEfPMCU;qEQm)va5zq?ECLJHOA+5>~DZMSm*ND|$F7DXlCn%;?p#X`bGEvL53u3b87-u#nVmhQ4Yvp;XWEF|THy^n$S8SuUi-aX)b zAG}|I_b_c_I{|zP!M7NECxUMo_*Q`L zWbmB|zLnrx1-`4mcMbTiC5-QS@ZAKyHQ-wZz762J6@0gYZwvVD2H$hw`v~HJcpt>) zK>TEgpAYdDK>T`$e+1&+fcQ5d{(Xr55aPdr_`?u?1meGi`0pY94^SmgWl%eXLG23a zAW#>AdI6~0K-~fAKmZG1?p~4_kg+=)cv3y1oeGTzXkPsP=5sVXHb6y^$*Zo zpedkr2F(I36SM-*f}n*!D*`QC3EBYA27z`JXg7hj3ADRFdjhoQL3<6fcR~9Qw9i31 z4BGeLj|0B~{!ZZU0)8F*Civ%qe>wP10{I0An9vjL*UN5{w_g_!*2}!T23az;uD>15*Xl52gX81!e-6 zL&01MW*eBVfCX47U=@HB1gi&F#bA|zRRPv;u*QQm0j#NDHG(xQ4A${rEdc98u$FsGMt2J2CvGgeM{4X-Ie$5}t>I7a`$mNc2HsIV3hfVgwS` zK;l|RybTiXfW)nkcsC?I1c?tr;#-7C+zpBELE;CH_!A`l0*Svt;vbMCK~fwfDUj3& zlDa^W2alKZr4M%8=>1C==K40`v{UHNbU;BDkLXDawR19gX9^I zyaJNXgygd!`5Z_-7m^)Fz8aEmhU7JnycUwzL-Gbl-U7+tyC8WRB;O0k+adX5NYNps z7E%^N$_0>e4WwKPDK|jMO^~t%Qr1GseUS1fq&x;GPeaPHkn%jFyag${A!Q$=9DtPf zASKHCDWrT3DL+A~7g8+ zsWTyUHl)sh)OnEl5Trf{skHh33u*l!Z2+VVg0!)aHV)G2A#Eb0O@_3okahv2-2-XwL%I&> z10a1mq@Muk3n6_8q%Vc^6_9>1q+bN-S3&wUVMxCL(r<$F)sTKWq~8hY+aUd3NZ$_W z|AX{LAbkgGIl}6Ymo5{Wc&e{ z$&i@|nE}Wwgv=tyEP>3vkl7zHM?>aV$Q%cm^^iFcGABdkRLGnI;mmoEc|2q;fXqdZ zxdbwoLS`#uZiCEsAPdOKgRCKtH5RhQLDqQ4YJ{w5khKW1mO$20$XX6rCqdRJkaZel zoeo)NK-PJXbpd4E0$F!L)>g=h?*FUs1rw`=xgPZ}7vlMb}f}9s2=NHHgLhew=odCI$Aa^R{HbU-n$UP2nmqYF<$PGj8 z*^qlKgK*vxkoN-Q9frIgA>V}jRLD<<{4B`Nf&3E4FN6I4kUs$O2SNT2 z$R7szBOreiH^PUIpC;5T^Se=sp;_ zkAd!WQ1BKM?1q9pP_P#Y_CvuzD0m+VK7@jgq2N;}_#6tpgn~m*@HG^C0|nnf!4FXI z6BPUc1;0VT9}tir5C;JT0-Yex1p*!j_`(oSA>fCA0RamF2@ptvKr#eUA&?G%ObBE{ zAQuAp5Ga5^5CS0x6hWW_0%Z`WfIu$@R6(E`0(~IR4+6Ci7zBZ#5Euc0(GaMEK)4

2fVyuA|EoQC;TJWj#KDABVFI4TN>S3(ycY!X45T7w;0`Cq}%IsdoxP+YIIMb zdp){Oqx*ciFQogkbT6g*D_oPpHECRv!8LQZrjTovaLuD!^90vCNsmkDQI;O%=rMpE z!{{-B9{15>Gd+sw@i9HVqQ}?tY)j8*H+o(}&q8{xpyx_@K2OhA>G?Xns?e(zy=v2I zB)!JdE0y?&tA-?+9Z*S6!@4qQ8fYZr3uVy=ChYoFoTbM!7l@2lut zo@nnO^d3d;G4w8`_bz(xq4y{B{+iz3a$Q%hyN>Jna@}2AcR$xXz;z#R-4|TT2 zjp)~$el6)YgMJI?x0rs9)9)Gjoul9HTz@&&U&-}@xqc+qkLLRAT>lW)ALNGe+z{c0 z>fDgU4YzPZJ~teWa>EI3c%1$r`d6TTCHnWK|3LZ=qW>29@1_5K`u{-x-xz=aeHk#A z0XH#VGXr)oU>5_vV!)3K_=$nN890!EgBZA$fg2gPnSmcL@Cyb;zvRYF+}Mj7dvoI& zZd}ie8@Ta3Zv2cJzhF=|2K8Z3KL*{+pbZS#$e=$k=uZs#l)<$a+Jb{}ha`O&uKETa~MBV%? zH~*cRe`Q!ZhIMCH4~8ve*qsbp&9LVf_6oyZV|Z1DCo#MZ!?PKl$M6D%Kf>@+41a&5zjN?MMk{Lh*Czp!id)y@c|<~ zX2hqA_<|8%G2$CClgaEvW*;*9k=dWjfn*LQb10c($jl~l0-3jvSwQAIG8d3pNahkU zqsz!#LFP&_SCd&p=2kMdleve?ePljF<`FUF{%cmQW=%TsK$(H%BbdyYRRb9jOxOuZj8E# zQNu)y%4F1NMrAW9hf%qV%45_dMoneZ3`Q+v)M7@hW7LC;iZkjsqs}twX-0j>s6R96 zD@IpkbP}WMFuD_?doj8fL%reH@&6u@}+02+?#*{E-D`U1ZW*1|QFy<&@9%IZ&#++u%lZ^SE ztO{h+A}d;(tQ4~9k=2l_tI29XRx7gFlhui=u4G+LR)4YvlXVkWHYV`QBm>lD$fGi1F&*6U=wN!B}Ly-RjuvfGf| zgX~^pUr%;_vImkqnCuZ`k0LvZ>~Un@PWEcDcawdX?1#yIl?+2tVeB5p9*Hvc5yt+Jv7a#ZGsYz{t~}!^ zGA^BQtr*vaaWfgWh;dQIoo3uQ#ywBYmE=?*CrnN=a@v#Ak(^HC^dYAoIoFeO134qe z8A(nqIeFwnCy_IioEhZICMQPDGIDMs=MHjKk#iS0r^)#<<7+U!598-C{yxT+Fn%lJ zcQAf8a!e%CX z&4i!GO(gd+a?6ujQB-afaszUc$W11<9=Q$4ZA|VUawn5Jm)sb+OUYeM?h10(k-MJU zZRGAGcMrMy$vsH!5ps``dzRd%$vsEz^W?rr?s;-Akoz4I%SV~mnu(cAj4^RL6CYyY zK_(tz;&CRPB<~vXdXaY>dHu-iPu`8>4I%Gl@@g`LVgYMlgLjdzaIGw$-kQX4Dy?j--7(s zoe}??0$bXjn=gEJG{8I8?CI1cb-zNW$N&eU5e@Ffg z*(ynLLKc6PcXHdvYI*_STm^zoK^O<@-Q%jh-m8oZ#`aDx#Wa>{$OJG_Vrlm5i8Pi%Y z?Ixy;W?B~0Rxs@zrbX{%+Cip0#L(F)T8ILjJO=f(+jE|UkDKpD4vjQ`(W@Za!wqoW;W{ziOE;Cm!^B!j2%giIp zJju*c%>100-!b#A%({|URhShPHLEAHZeZ2`W=&((d}b|V)@EkyVAd{XonhAV%zBYo zKQilgW+yPaI%4V9sRb+`*i+%(;&_$Cz`5IcJ&kK65^2&Yzik6?4PP zjWD+>bFX7=U*=9??ri4HW$t?B#+kc?xzSSQzRlb}Fc0&tU|w0~r8BP;^V%?PF!M$- zZ#47PFmFBcHZbor^Ug8vdFK7f{6ywo#{9<2Z^Qg{%+F+g4)Z54e+~23Gk*i~&oci- zqVr#7{uj*up7}qppcV@nuppHM16eSf1(_^Z&Vn^8xSItBS@0MOPO#t;7JSWuZ&_HK zg>_k2pM`x`IEaNqSU8`BOIdg;3wN;a01FSX@LN#}|IWf+S=5n5Jy~=ui>9(@9*Y*R zXcLRJvuG!ao@3D~EP9Pa->~Rs7X8BFFpF!mIGM%0Sv-)%gIGL+#S2-yn8n*!{1A%| zviMaN|B=P-MTuTQv@FqbM4J$8OSC=FQA8&Yok(;q(T9m1rSK98%Tid5!u}NAOyO_} zw^6vC!UGh3L*dU9{z9w|u|dR!5Gy9Oi`X7wKeOaw@FkbBWB^Nsv19~G?qbRPEO~$> zZ?ohhmi&pOsVr^A(iSY8#M0R;oy*b_EPaZl&#BK3Ls&kP?MS4%B-l#iX>JHXT?}nx z+;NCIj&sLJR$}E9tSrmQeoJ! z=L+upggd|H&Tm=Oj#b@R)q_X~*0JhcR(;B<&sklE)s0x4#_9#EUdHO^a#p{{ z>epHQCTr@lCY?2nSu>l9Z7XYHta*wxFR|u4ch%vpM%xo!(C^& zyDE1lad#c=&gJf@+&!JUk8<~E?mojk)rj7c!aeo4=N9gn&OI}^=Lq+lwjSV-?+a$_g};P zJ-L51_ZM;h{oMa2?*9w-f5V36Z0Nv-PHdRVh8P=`vSBM54zl4e8;-EyVK$s$!&x@G z%!UhWc#REjvf&*zMBin@=WO_r4PUe2J2w2lhM#z#5f2RJfh9a}kOw~Cfq$^^N;Y1_ z#tLk#%*HSqtFbYIjjh?(mW`d**p-dfuyFtzZ)D?8HV$XwNH%7(F^A~JTsF>PGu<=i9{FIGfu<G*RHkDyhh)tKX zsS=xNvniQPsccGPQ)4zYW7EZUfK46P)R|4)*wmX%{n<2wO>5Y6KbuO}w3SUe*tDBX zN7;0YO;5Aw9GhNX)5~nSz^2#O^d_7B#HP>K^k+7G#ip;>^evmdXLEHnw`TK5HZO^? zxrEKnu=zZjFHn3l#Um&lMR69z<0zg$@hub=P&|d==@ieRcrL{YC|*o)jN)Y!-$wBr z6tANAE{fMuypH1gDc(r&gA|ugyp7`MPKx(Xyr1HO6d$4ZD8-Lae3IhR6hBGv(-fbh z_yvkzruZV!*C>9I;&&*1m*Nj7{+QxVDgJ`uuPFY8;_oT`k>Z~z{*~h2iC;oIk!bvK z;$?}KCtitoRpJriHHaq>PbOZEcthe>6VD*tlz0o`t%Py7Mmn~BGXZzaBi_-^9+h#w$+nD`^aj}bpX{1ovs#Gi^1f0p?3#9ty_O8iyg zZxDZ*_#cVCPy8d|pAi3?_?N`LCjK4qABg`%{1@WCQIbH(rIcJo$(59pqog7wRVWE4 zsZL2PO6pKjmy&1$N*Ym;PDv9=np4t>lD3p|prkV;-6-in$+eX9q2zi>22e7HlA97Mll6jOYycn30B}7YZrQ~)>R#LK>lDjFnmy#k% zHc+yKl5LbcL&Q9XCEru>16wMvr5ammu%#PY`mm)RTW(>?bhgZ7%Ux`_pDhot zb*qXxDu57)It$o=#ldX%`8fEJlwm#3+7uotV+b&_- zrEF`@wrkkdlWn%LEo|GywsUN|z_wS}UWV;gvHjxrpY5&L-i7Vm*gk^o zp)*wKp}z1fk+jv4Hj#g5JF z*ujon?0A|TFSDbR9e-g*^l$9=nVmJ*S&y9!*x8Alz1Z2Co#WV9z|P6+T*uA_*%@c& zId)!P=d0}ehMgCE{KBrf>`G@>V|HD~t{d4km|fG@HJ@Dz*|mXPTiCUY=&rNudXZf( zv+FB%{m8DL*j=67b=h5?-Cfw-o85ibJ&oP-*}ah6o7ugC-MiR*j@=j7{VKbEW6x#m zxq>})*wctTY3v!!p0Vu7Vb5~*tP!>6ZuT5z&k6QC&Ym~e^FDh%WN%saR%LI%-X`pA z%ii|v9mw9{?9F8FEcPyDZy|ekviBf+53~0L_P)m6H`x0V`x4k!hJC5*YsS77>>C_q z-$?e2X5U=)#n`u$eGjs4C;N7@?j{`SwU@8aZabN)lwsK$}2Oi?UTO9b110QqXcMe|8 z!7Dk~oan(09PGrw@f@7Y!Koa4fP-5(xSfMfbMR#jmU8eH4qeKj5Qmy@s4a)ub7(Y& zayfJhht_atJ%=`M=mdwJ;?Of34so~whbwV-Acu!@_+nT1;afSpn!|T-_z;JWbND2O z-{bIS9R7kMi5w}E=O+X$lV-S%aP+8Im?l!Ir2G2zT?PW zc{swu$vj+_hkNkw^-&(~&%@JscmWSD;^8NF_&FYafkz@dlFTD@d1M@q6!6Go9(jyM zp5&3IIGV`O@*J(m(a{{u<>)OOeVC(Y^x{XKo^XLH{ z{hCLA;?ciztP{t2ajZATmT+t($5wHylw)sm><>IvpT`>WSW_OG#ACC0Y%Y&I%41LP z*pnQ`@hdo9mg9XmK8WK(IKGbK7u!LP$2tBM$3Nuw$DF9ei3XfVjC$awty@&#PqpQ#i~S!@UBgo=dFo!CTE|mw^VCN?^(UTg#nYX6x+_l?^7IOxUdhuh z@$?%!{T9z$%`+`{rWMaD*g;m}hU}*%d^eJ;$>b zc=lD!UBS7^oU6*Yk(?XPxm?aY%DE>v_ax_j;kiqBF2r;FdG2PO8_sk0@!V#fE9SXB z@!Vf{?i-%(#`Ar6z8}x;8Df@m*g0lovner3SpzgqND}(o|lW$4d)%=_oIq=A|>d zoWRRh^72)@+&{|8H}mpvUf#mXdwF?3FaMqMWjLS6`D-~pfb%zUeii4}aeh7Lk8%DC z=g)Hf8O}e)`Bymq8t1>|{BM+^G=b7fD6K$gB}yk#I*rnqlt$-JI-k--lonFDl+xvt zuAuZzO4m?&52g1}x}MSpDBVnHoYJk7?x1uxrTZv7KA^sBD3M56w22mYK|CN~UINU_pEciVIj4Y!RfWYXMP;uqYBiQD7j# zMiCGJK~MxiI*SdAL=Y4eK?I%DNlzxzlIc|t<$dz@-Cxe<-p@JrA2{dUv*-%0n~bl*$&19U%3 z_fP2lIo*%Z{VTeEP4{o;{w>|Vr~6sDpQrmTbpMs^)pW0;yNB+6x(DgrL-z>XvvmKR z)rWSm`e;_alhyBG^|7ozp4A^__4inP53BEE^@FT_nAMN5dL?V#$eIe)EM!d$YZkGl zl{M|G`7~=j&zkdDvy3$tvgR_@T+W&&So0if{>+*eS=04b*7UGu6Km(Qww|?(tX<67 zR@NTO+BdWIY}Q`L+Ap*AtE|0*wO6rr1#5rH+80^-SJtj(?ON7uW$kv>HM8yz)*Z^a z!&rAX>n>;AwXFL#UF&XO-A%09%(@}g?P1+0>&97sIP2fT`eRxDKGuJL^*6EpN36e_ z_4l&=e%9~h{*(2uv0)!J?8kNz{We+_#-y{n2q2WswnoYl9)6;Bvj!l1N^FD0u zDq?d9o6Fd|fX#JmZea5nY`%caUt;r@*?cjZm$UgwHm_pyvuu8j%`dR|MK-Tza}QgJ z*iyrmMQmwcOA}j`u;n1Oe1v!0CBU?w<`U+cLW$SBf3)|kv zwgR@jn{6lPvh75+ox-+{vhCAs`wZJ2V%x9Swu)^}vh69h{fTYQvwc6dm#}?4+ZV9C zlI;y_Z({ow*?t+@FK7GL*?twzeVXmFY=5nb9bv~Y>{!Z<|DssMs_x{^GtRwW9NnJ z{0ck2%FeH|^D1^e$#giMoUUERuxly1 zj%U}$*!2l^oyxA$*>wiHKF_W%vg=Fi`ZBw|%C7IS>j`!}&#p;!{e!;!=_{abA$`^K zHPhEhUk7~$(swX@N6>d9eMiyvPWs-XOW#uZK2G16^ev3>@n z{m0RNKK&QdznuQ7>A!{kTj{@x{(I=ZpZJL%s= z{|No#^iR=0OaIFZ?8CtR47{F!H!)DiKvx9=^$fH#a2NxJGjI$8?`Ggw2HwZO2N^hp zfsZlp2?kDO;B*GgW#B6eEN9@`419-y+Zec=fjb$vn}K^7_z43)XW%gge#OAA8TdT| zd;PzGbqw?}(8s_i0}~8PGcd=%D-7<--~kN2fx!X>ix{kAuz|sY7(AH4BN#lA!J`;_ zCxh={a4CZ)FnAJ!A7Su68T<@`|Ha_N41SHlz5f5;3I?xc@LC41XYf`Af56}!4Bo}y zJq$j=;7SI6&)^>!e2Kws2G=sUfx%4-?qYC&!QBjwFgVW8n;3dCLq{`oHbY-z=&KB^ zVCb6+-O$C*cNzK~L*HlUhYa1v(1Q&94?{m?=obtSkySLt7Zy&QLEy{R|B; zG|A8mLw{%J9}NAA-3!@$G`p9w`yzICvHKc!-^}j2_xAU&`w@2kjNOm2dnLP9vHKZz z|B>C#vHJyfzr^m{>?vVS9ea*t&++W}1bfb8&ocI0$ezpCa|L^@WX}rrT+N;v*>f{{ zZe`C8*mDPaRbnc)Ovk99Hf3?q9OnPzk!M&~hF#b^zqiy3WWw3E?;7(Im1w=wz- zMvrFnU5x$*qn~E<8;st}=x-Q(meDngZe?^2qa%#YGWs&3uQK`?WBV|+*LWT)WUPd- zGR7(xt7oj8u{Sez6l2FQ_CCfw$k_3WoygcJjGfBZ>5QGh*x8Jo%h;C~yOgmNjNQc8 z&5Yf_*j!`S_dJ;d0fjIE?=Y!zcqGWIlM&oQ=^v2BbEGd9ZD9AmFA_D{w!zAxhi zj2AIp%6K{Bm5etq-of~xj33STI~jjJ<4YMof$@_V{|Mu!G5#6G&t&{_x{Uu9;}!MxjQ24<%J>B1(~QqC z{t6TOGI0PCZ(yQ;i6SN{nb>>(HE|#lZ)4&}Cf>uuaZJ3QiKR@Oz{Gzt@ku65W8yPR zoXNx$Ox(}JFPYfN!~hf1OoqvQnJi$kh{;kW%bBcXvVqBFCR>^8VDdmFyN+Y>3rt?h z0_BbiRn|AK9%XynLdN*vzb1Z=`ZOr{bi=V z%Jij7U(WP5nf@-*cQE}h(~mH{lIc}UKgsmdOh3c)3rxSnbT`v$ncl$k6f;%Kv@vr$ zGaqB-3}!yh%-5K?l$rl#=9|o1!^~cLedY#czR%1LnfVbjKW64WW`4@dDrSDq%-@(< z&CF(IwlTAlnLcI)nVDc_nwdFfUSZ~+%pSn(d}bFh+sW*K%pS(<5zHRR>`~0Vli9BK zGrN@86PP`T*^e-L8nd5c_JX|xX0KrOYG$ux_IhS-WcFre?_l;WX76G4er6wH_EBb^ zWcH8Du4Z;Evs;>=d(m^B=P>Gy5vDuQB&}=1Q5XVXmFIPUhai+*_G@ z8*}eq?r7%T%iIT;`w(*KE>R*%w5Rba^|jN?po%qXYNMkZf5RQ=I&zd9_H?6 z?jdh7_Xu+DOAn`GVzGVfC|?=+eB8JTy6%=?kddqn2_RObCc<~=6!W~6W*DcnyAUnhlckiw-> z_;D%xq!gYeg{Mp5{Zjat6t0xQC#3KxDg2!jJ}ZUKOW|LnaJ3X})Rn?sDIAuLRwqMu38qf)d|ik^_7r=;k2 zQuHS&`kNGOl%fGC8j_+>DVmU?X(@VHie8nX*QEIMQd}a%i=?<-iWf_9n-q6S@xfAj zs1zS2#a%~9@o`dof)t-9#b-9xtUQO6e(5 z`Y|c}gp{5trKd~j8B%(-lwK^Qmq=-su9Pm9(yOF&g_K?`rPoO5x25z!DScc@w@B&B zGQUmczfb0$Df91>`A^II=Vbn$W&VpY|8FvXtIXdn^Cx8fw9KEA`LD?Qe@YoC+gHjC zkg_*OS(TL4bV=DFDQl3jW+`ixvJNR*B4z(3WyeX`$EEBnDZ53=9+a{kDI1ouSEPJj zDKD1tQYkN&@=7VMmhweX-YVtoQhtz>A0p*%k@91t{9RIhoRq&`%Da|I`H50~ij;p$ z%1@W_FG%?prTii(zgWsIk@72~{7NZbA?4Rg`R!7Ehm`+R%6}o{zm)RFrThsge@e=K zC*^;T@;^y=kCbng@@-PSQ_B0Kd{D~wNLTr&luyWl5?SyTS#X>zI9nF1kOdFRf|q2$ zh%9(T7Q8AIZ<2~asVI?(GO4JLiYlq7m5O?)_&2FIL@M4Q6>pV_Bc$T(Qt|Inag0=) zB^BS4iXTYD)4EczMJgkeZBluNR30joZ;{G(Naaydd9+l1P%1wpm7kKzPfO)zrSdGP zJVz?elgjg@a+y>vm&z-pa)nf0EtS_w<@Hi|qg383l}||Jc3HT;ENqp9Cv?feFUrF2 z$-+Bj;RCYpm$LA2S@=6y_!n8&Eep5E!fmo}P!{fyh0{{?dZ{XsswSyANUBbds!vJP zX;QUJsxFeMi>2xksp^udy|xkYMLO3m-3=8sbIlGJod&04A1AT^t$W|!0qNX>4k8IhWCsoh6vJEZmqsXbn5 zKPk1RO6}=Vdxq4WEw$%L?WIzCwbWiCwbx4Rby9mzm(>1TY9E!_l~TJ(YM+$ar=|8U zQu~tB{#9!KCbe6ocDvM$N$sT6&PeUwrS>0E`!87(S+t)lnlFnM$fAX^s74mm$)ZMC zv{)9k$)dN)qEE=8&&t7OrSWznBx(VWy3N?ob@p*%cbtyQuiIHyGiP9k-FQY?sn;_ zyI1P&le$Nx?w3;cxYRu%bx%p%@1*V}sr#GMt&zI*Qnyj+wn*J>sT-EM8L9ib)cr&1 z{w4L1`Zr4bJgG01`uS47K#>Q9&Y z&r1DSQh$!rpC|R_OZ}x%-zD{zOZ^p6f1T7{FZH)b{cTczyVTz)_4i5rgHr!LQvXw_ zUnljiNkg+V93l;eO2c8&a733h94QS)Ny7)EVW~8HNE(iphO?yMOVV(WG+ZnVmq^22 z{&H#fmNZ-|4L3={Ez)qCG~6x?4@$$s((p5BcvKozO2ZS<@RT(CP8!xq!&Yh7BMl>a z_f1N}j5PdR8vZGbq;X$qERn`4X>66oc4<6F8V`}iw@Bj=(s-ma9wm(*md1}uHv8ZVQ^E2QyCXZod{`PEk;b1%yXA5o!9YG@U6;pOdEllBO?6(*@FWn>5`gO%F)Z!_xE< zY5KV|Jtj@7r0H2{dQO`DDov|(rD>fs^+?lpY3h}xK4}`1<}ztsBF*oX<_}8qhot$# z(tNTspDxXxmF8vAe33L?EX|ilbC)zPm*%Ua`5V%Fi!|RR&9_VQozi@_G~X-D4@mRF z(){>d!5V4aF3r=jxL6h+B#YlBi%*fo=gQ*CWbw7K_)b~;ge-na7XMBb|3Mc2NftjZ zi(i$N{iWq~(o!fbCDKwREfvyIFD;$Ya*(tfA}w!|mXArxr=+FpQfc{bY5AtKTq7;t zm6q>G%lD<_9%=cxv^**;E2U+Xv^*m%e~^|}rFDO4t(Mk$X+2C@kCN7Nr1cBZdYiP~ zC#?@i>(8b2F=_pkwEkLJe;k&N7p-~<0$EPw{#pU9q*Hl4@t*|rQ>AjI8{2% zmW~UhHL~>UMih8NauH@^Lx_yed+w6blxeQ4@l=j()n}g zd`vojC7r*P&fiPtv(ovzbgq`pb<)`*ojawoPdW#sb5c5AlO>TQ`^l2m`9hY|%aTS} z(kx3_Wl4uDIaHSXA6as^EP1;udABV2yewHJOKz7X_sWtNWXa!T$($Vc205@$4lMhB VPlD~Y&;Me@&;A$xf91dh{}+ieUXB0& diff --git a/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist b/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist index d8e15f635a..556fc539eb 100644 --- a/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist +++ b/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist @@ -8,11 +8,11 @@ ignoreCount = "0" continueAfterRunningActions = "No" filePath = "field.cpp" - timestampString = "372274896.176083" + timestampString = "374955033.430214" startingColumnNumber = "9223372036854775807" endingColumnNumber = "9223372036854775807" - startingLineNumber = "83" - endingLineNumber = "83" + startingLineNumber = "98" + endingLineNumber = "98" landmarkName = "field_avg_neighbors(int index, glm::vec3 * result)" landmarkType = "7"> diff --git a/main.cpp b/main.cpp index cf2ecbac8a..84d7587730 100644 --- a/main.cpp +++ b/main.cpp @@ -58,6 +58,7 @@ using namespace std; // Junk for talking to the Serial Port int serial_on = 0; // Is serial connection on/off? System will try int audio_on = 0; // Whether to turn on the audio support +int simulate_on = 1; // Network Socket Stuff // For testing, add milliseconds of delay for received UDP packets @@ -96,13 +97,13 @@ Hand myHand(HAND_RADIUS, glm::vec3 box(WORLD_SIZE,WORLD_SIZE,WORLD_SIZE); ParticleSystem balls(0, box, - false, // Wrap? - 0.02, // Noise + false, // Wrap? + 0.02, // Noise 0.3, // Size scale - 0.0 // Gravity + 0.0 // Gravity ); -Cloud cloud(0, // Particles +Cloud cloud(250000, // Particles box, // Bounding Box false // Wrap ); @@ -120,7 +121,7 @@ Cloud cloud(0, // Particles #define RENDER_FRAME_MSECS 10 #define SLEEP 0 -#define NUM_TRIS 250000 +#define NUM_TRIS 0 struct { float vertices[NUM_TRIS * 3]; float vel [NUM_TRIS * 3]; @@ -394,6 +395,7 @@ void update_tris() if (tris.element[i] == 1) { + // Read and add velocity from field field_value(field_val, &tris.vertices[i*3]); tris.vel[i*3] += field_val[0]; @@ -406,6 +408,7 @@ void update_tris() field_contrib[1] = tris.vel[i*3+1]*FIELD_COUPLE; field_contrib[2] = tris.vel[i*3+2]*FIELD_COUPLE; field_add(field_contrib, &tris.vertices[i*3]); + } // bounce at edge of world @@ -734,7 +737,7 @@ void key(unsigned char k, int x, int y) if (k == '/') stats_on = !stats_on; // toggle stats if (k == 'n') { - noise_on = !noise_on; // Toggle noise + noise_on = !noise_on; // Toggle noise if (noise_on) { myHand.setNoise(noise); @@ -759,6 +762,7 @@ void key(unsigned char k, int x, int y) if (k == ' ') reset_sensors(); if (k == 'a') render_yaw_rate -= 0.25; if (k == 'd') render_yaw_rate += 0.25; + if (k == 'o') simulate_on = !simulate_on; if (k == 'p') { // Add to field vector @@ -808,12 +812,14 @@ void idle(void) { // Simulation update_pos(1.f/FPS); - update_tris(); - field_simulate(1.f/FPS); - myHead.simulate(1.f/FPS); - myHand.simulate(1.f/FPS); - balls.simulate(1.f/FPS); - cloud.simulate(1.f/FPS); + if (simulate_on) { + update_tris(); + field_simulate(1.f/FPS); + myHead.simulate(1.f/FPS); + myHand.simulate(1.f/FPS); + balls.simulate(1.f/FPS); + cloud.simulate(1.f/FPS); + } if (!step_on) glutPostRedisplay(); last_frame = check; From 95b106b86724ce76a77e5264f5c5fc5ed5c647ad Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Sun, 18 Nov 2012 11:00:21 -0800 Subject: [PATCH 009/136] Removing some of the triangle code in main.cpp --- .../UserInterfaceState.xcuserstate | Bin 101260 -> 101257 bytes main.cpp | 63 ++++++++---------- 2 files changed, 26 insertions(+), 37 deletions(-) diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index d1f81ce64c67c4f45d200e9d7b00025121c7c034..1cabcd6b99353d42e0982f7cc429127206fb4b0b 100644 GIT binary patch delta 7635 zcmZveX>=5Xq5Xfq%uEu;StpapI+LLaiXiTa`>qJCR0J2)YE{&#Sf*5-{&A@b zPti)HQnh-ptqZTHxVO(%pRHS4OZ92BZq+0+*=7>5z2o=)$9wK_?w5Pcy>(;p`i;d; zT+&m2!BgKn(|FeE`EwTRJ*#ET?70hmc*g2Ev**s6`}9pcsj|ogmtAt*^JlJ}yT`m) zEpz70efs4>X{2oW`n93`*M^p?EqZ6;d&NHPL)(8;;)Y=8g2(26S-P<bu=-C224@1wR z(6beKehod_q31d1c>#J}hMwO+&zsQmSLpc&dcJ|4Ug*g~&nWbaL(dfS7C~tq4xynJsElzL+@$OdnWXr3%wUW??upi3G`kDy|+N` zQ_%Y|^nL}s{m?rFebb?@7W(R;uNC?_pf3r1Y3SPx`sPF5KG3&6^c@6!3!(43Wa#@3 z=-U8&JD~6P(Dx4XeE@x5LEqQVHwb-s=o^K;ap;?ZY$;^RAX@?1D#+GAHU`;F$nFl= z{UCb)WRHOC(U3h3vL`}z5oFJT?AefA4%zcEkX;Gc%OQImWY?X)=hU^x| zJ`CAMA^Rj`w?TG0WS@iV3y^&cvhPCnL&*Lcvfn^<2(ksp?t<(DqmNB*>itxuuX>2DufGJ0EfvLhcI4t%lq>$lVFK z4UpRexy_K<0=b7F_bB9^gxogBZHL@*kb41guR-oT$mISBxlbYY4dnVD*AKZN$Q7U; z=np`DG4xM^{xI}Mp+5or-OxWD`WHa|A<%yq^dAZR$3Xw_(0?lQp8@?#p??|luYmqF z(El*>KMnmKK>ufC=y+0SsLSLqCL}8)4`!7}^9wkHOGZ7}^Fy+hOQA7Bx+e+=Z0hx|#9KLzqjA%8yPzYqEAA-@LlH$nbp z$lnI}J0QOS@|z&P8S+~o|8NHKk3#-Q$Ug`9-$DLu$iEBu4

$@*NnN4I_KP$UGQX03(OM$YC&YB#ayb zBgezYsW7q(MlOMo@59J7FtQp(*1*VG7+D7+x53CAFtPzg?tzi}VB`@Pc@##r!pN^+ ziibh0&8>bPH*mW}O zS`52RgF*K*j^2fM!2PQVy27J#v07@G!T9Wb^VjLn9zJu@&i55|swv1Kr}0>;jV zu?u1BVi+sHI50j9#*;9;u4s+3?{CHiPbQ%1}4_R#9c6P zH%#0M6ZgZ!&tT#oF!3*#$ic(_Obo-s2uzH@#3W2Mz+@XtcEDs3CetvvA50zylZQZN z@-Ub@0w&*s$q!-j-!St8!_+L8+8w6$fT_J; zYHyg@7p4w?se@taP?$ObrcQyWB`|e5Oq~f+cfi!o$S}1TrnbP;Lol@yroMowuVCsM znCeAZk=>Cw$XsL|axroZavkzRB!g^2UO|3`yoUS{nLwwbVKjnPp@*Z3(8cI!8T1VF zc61Z^0Qw;M3-s^k=jfN{*Jv-ALkF=af<<*$)PO}XENa1`y|Cy=EIJvBPQjwZSacc| zory)?#iEO`D1$|JVbR@KwDDW~b1Zrsi=M=y7qI9xEczIWzQCd>3`8-|ih*_vbYY+y z1K+{GYz!>Gz`htb00ReOUbwFVQ>xx_aS3&KMWp-!9y^3Dh5x-;29WPiosb1gZE?b0SrEf!QW!=84Ny)!RImf5(Zzz;9D5{ z1cRSp@XHJa|Bb-`4DQ0<1ct!SGz`^Zs2M{Y7}^6vb1^g@L;GN8e+(Ujp|db_A%-r- z(4`o<979)P=m!|ufT2wodH_QYV(1|ZJ&U0o7}mLp>PEVrT$EQ&=n(2eG&Wi>G68ITkl!aR(OffyHyNcmWpghs6hC@u66J1Qs8S z#oxu^tFicgEZ%~}zrjrLlUV!=7Vp5~7qR#iEdC=F{|^@b8H?Y+k|37Ev1D&7IT%Y8 zV#%>sasrl|j3tY)|vPHkR#)W%ICX z0hS$(Wk+GzF<5pymYsxUi?D1lmR*Nso3ZS5EE~kKaSVqrT#n(H7_P@~D~3}T-VMWt zVE9lBABW)+Fq}CV!>3^Q91O3(@D&)o3d7f6cr}LCV0bNt*J1c34Bvy{`!Ku-!<#X@ z1;YV#Q9Z_!KL?!HN;A*hR*QDU1|hB!rRa7%9g{B}QgqB#x0JM)tzUe2nae zkpnSu2u6;;$k7-%4kL>&au!C;!^jmFxe_DSV&r;^WH7Q0BR65>W{hmW$R>#84@_MXX zgOzKsavfIQiIp3$@*b?b7b_pc%3omRKd|ykto%1t_F!cnRu-^w6szW7)d5&_5LPY3 zs>87Ac&s`RtNtHms_w?BjacT9g(!Ky4)^<&j8tQyDaJ+S&f ztUeg4PsZxSSbZ8+pNZAq#p-jg`dqBO0IM&;>eX1i2CLU%^*XG+39Hv*^{r&A{s~s! ziPeu|^$x857^}x{MmNql7H6!)8IR$NS8>Mw;EX@vjJI*ddpP5tIO9{CF^DtrIAauN zjN^)<(He}_VzdFHF^slgG=T#qw9Vf`YkUxoEQ#riE+ z|Ly+kpTYVaSpR#he+%pXg7qI`{U=!eHP-iHeI6U8V?z}-bYR15Y&a1cmSV%%*l-0l zT!jtSV8d!`Sc45~v0)uH+=LA`W5aFO@N;bV1vWf_4Ub_%=5cKJ4K_T54gZA=&tSt2 zZ1@{Ce2xvH*jSE@P1raW8;``sbFuMSY`g;-@59DT*!VMSd;}Yx#>VHc@nvj$9UI@o z#y?@>->~uT*tioLKgGt+vGEJcG-k1J02>Ff32Z9DrfJv|#-<21Rbx{`06qg|VYCb`Hia#n@#STZOT!F?KD+uE*GVjNO8< zTQPPU#(s*iO&HsXv0r2CNsMj7*mjIPhp`th_HqVeZ(;0RjQtg3f5+Gd82bogpJ41W zj16LQ9X5Ak^TF7>44c?72{0@w7!1z5FzYpV&VEj>xKZ)^e7~hWZ=P(n00pqV>{B?}KiSf5E z{w~Hp!1(7FAHet!#z!$ej`1mM31UkLwoJ#CDr{-SmKJPj!lwl%_i8f5k#>5_&*b5VTV`5)S9Ds=6kb_gNc=x zxC9fIVPX{~uEE4=Osv7gotXG3CVq*D$1w40OgxE+ZJ2l#6VGGfB~1JY6Mw_RzcBGR zCcef*FD7!B7{Ew!MvQUtv4gJ`LN;u)PA?tFXNp+gq@GcWmDS+xNouy|H~?GPWOp?FVD~q1e6% z+m~Sb>DYc2wx5md%d!1DY+s4(S7G}d*uDwdpTPDvvHfdo&tpe5c64FKKG<;-cASMB z*I>tL>{x>xYq4V;cHD>^&tS(Z*zvmzcD#Wdf5MKpvEx1L_z*k3#E!4AqZd2!*crvn z2JBpborhrOVc2;jcAkWtr(ox)*m(|iUW%QUVdpCByc#<**tr%vpTW*ou=9QF{17__ zuyYK%GM(6!#;zsUwF0}&!>&uQ>vHV661#qYUDsjP4cK)fcCE**Te0hQ?7A1b9>%UG zuc-S}Ff|WTdt+*UOdW)&g_t@KQzv8UR7{B z>^=m$kHGGuvHLjeJ{h|gWA|y;eGztNR$=!#?B0ys&tmtVvHKnDeh<6 zOeZj%#PnQD?~mz&Fuf4dhhzFkOfSXsGEA?)^!b>+5Yrc9`btb+jp-j^`bU_)0n;~P z`gY8u@51!mn0^4$4`TWuO#cegPhk3KOuvBXH!%Gsrr*N!yO{n4)4iC^V*1;1!UVy&ML-P5uCLEXB~jER^qJ7an?qhwFzgvinIQLvvv}2*2f#acZ+d9`9FxEfOh}@ delta 7640 zcmZwGd3YNH!N>9Mw@KPU(cnB$rwo6`x7 z7a0r(D3?PNIYbed+^6D!%IS*?5dpc{CcTnuz21la-~T_)@A>}oyLwycHQP!bT-DdO z?Cr1p>a8=@?lXJlti5|%_MSC!-*u<0-DmdP*)t#7)R(G=o@u22t<%@;J!kHIy)FCf zJ@cWb3gwZC+Uqui4&D$tc|+*%ZLgGipVvKlWtlewL(5h?R-f56Eo@iGtXYp98g^Q+ zsq(G56_rodt*pDCZWW7bZm#^-hS2&Af!8;LzVqleqdvFCwg;z~eDt_R=U1kdy}it% z{@=XPIm;fixoH{20PJ^E3Xzgv)54}F`V?v1UjzLQLI1PR z{|WRD!2mE&0Rs&%&08sHw;Wb7Rc@a*)U`ykgb7i46-vI+XdN~kUbc(heGzt zkUa{r$3XUY$esk*Ga!2=WY31|c^SyAfb7MPT?^UoLH2scZiVcvki7%4+aY^5Wbc9O zZy@_HWOqRJamYRe**`$`HORgL*-s(c2iZ}`79l$Z*-6L=xji9Q3ArfbY9Tiba+w6= zx*@kW2=Rod!$bB7ht0DJo$bAoT*F$bAL#VW<*@qA*koL(^a=0Yj}Y)B!`?FqDR&1u(P3?B=_C&2I$7+wy;t6_LO41WiPH^cC?FuVnZx5Dr? z7`_9Bx5Mz=FnkXT-v`5gfZ@yl43EP|JB;iFBL~395ioKBjGP1`XTZprFmg7GoChN- zVB}&LSpy@N!N^(|xe`Xc4O`8$lf z2APpJVPp_S8(_2(MvsBfQ(*LL7`*^SFNe{!F#25>{XUHT07m}@Mz4d>n_=`PFnT+T z{uD-k2BQza=nfct4o3e5qp!f|KVkG882vYlz7M1Sh0!dG4v}GW6h@0MItKY5tAio&$OCf(gl&azX17HApaWVcS8PM$o~iOA42|P$PYq(1o8#Q z?}q#Y6iT5Gg+e0~x}cDP!ah)#4TZT-I1mblK;cLzd<6#jxvC*mWB0IvaMK3%fpnU3u75gk58>YXWwc!tQd|eFW@20d}7RyHA1L zUxnRg!R~WlcMf)cZl_?Z1ja%zRt{qoFxCZQGhu8W7@M7evAHm|5XR1ev2$T;IgG7@ zu~jg(3&u-eyaL91V0>>FKMuyvgYnfcz6QoGgYmU6ekF`=fbm;l{B9Wk1&rSd;}5|2 z(=h%VjK2uuFTwbqVB#2<$eaQb=fK2yFtGwAE`W&(Vd7$#SPK*DU}8N?Y=nueFtH6L zZh?tgVd8d}*a;IK!o(0vjKD+zCU(Qb1WW>x%`n*klU*>`1Cx8f1u(S`rcQ>bQ(k@@L$l}*I*KLLSkj0k%~+Dal2$C4gC$?Vl9RCHWGp!aOHRX*GqB`L zELnvm8?a<6mTbe4o3SMGQ!IG^OCG|Kr?KQEEO`%0KE{9;sK-D%20Ag2!ay$uzKDT+ zFmNCS4#vPd49v&CsTep714}Tl6a&jJa1I78!oasMa0v!BV&HoixC#TCF%!5J16wd~ zKL#Gbzzz&Nj)A8z@EQi*z`&arcnbqNG4L)1`Y@2iz#s-DF$e|&7%au$9vBQ`FowZ; z3^rh}1A|={?7`q(7@Ud0Sr|NsjKM=Nco+r`$KY2nxCDczWAJMjJPU*8V(?-NuEF4? z82l~r z3|)eu%Q18#hHk^q9T?hY?~mmNVflP4Ux4Kcv3wDhFUImyvHUD7zYxnW!t%?od@Yt=iRBxx z{34{a8_n6^$9JXvT^ZR`g=U7qMa=teA}zbFtz;toSlk z9EBA}W5sb;aUxbM#)?z0;@enpCszC!D@L(m62oN}j$pU}!%Y}&$8Z|MGckNPh8JM? zSPUPJ;mk=GJ{iMjWB6PQe+$ExV)zOSufySov$L z{4G{Kf|Yr!3Sd`027!e~ujFe#{jFAXNYB17(kt9ZXFfs=t`(xw~j2woM z!!fcDBa1L{EJhY%WGP0@$H=!ZatTJ(V&qDUY{1ASj9iV8YcO&nMsCB%y%@PagOT50 zSaux16;T!1y}ux35hY{Z&PSaUVjY{r^v$yjq8*4%(K4`9ucSo0p% zOkiy<)*gekU&q>evGxV5eHm;2hPAI^?HgG8AFTZVYe%uRh_z!_JBf8-T@dTaux?MR ztHin)Sl5DetytHAb=_E(&S2daux=*S&BD4xSobxoTZwf)#<~Zv?j5YlVcjIg$}m=k zv1u5ajLj`{z8hOs9x_6)|J$Jp;N_D78U1!M1E z?B5uBA7lT;*e4k4!&nw$Ll_&yX)&BO7cgAWnM?rw!t?NvsF!BUoR9 z^)ak(!1^YvpMmvBtZ&EqSy(?C>*r$qfmnYq)*p)XU&8t$uzn@h{}0yRf%Pw7{d?FD z!iHXKm`%opIoL248xF&U`PgtcHXM%)Ct|}{*l;d3EXRhG*zgT(xCk4*g$KZetv#Oc4s z>964QH*xxVIQ@N`-iOn(IK7CCVQj3$#x88!2OAe-<5}2v4mMtjjaOjfI&55zjT^CX z6EqEnWhnJDqvF)n?u-KhRs#jT#e0j*j$gzGq5>{&28A+iOq*$^QqW;88&|p zo4=3E*J1Mw*!&}Gz8RZ;g3Uj}=3ij*z1aK!Hvbx%pU39Uuz5GetH~Ijf$k{924(hw&{KzaHbaWBg8x zKZx;%F#ZU}e~0lWF#a^gpTqcz8H~S<@qb|apBR4!1c zCNeWIF$)v>VPbzw9E6Gam^cm-CtzYRCQik~5=<<`#QB(5j)^sxxD*ptU}7C6)?;EL zCa%N8^_aL36E|Vv$C$Vc6Zc}`eoQ=qiQi%32~0eVnZ$FL_%kN{iiuY+@j52{fr)=( z;!{i%FtH00lh`7*1hHigYzbpa1Y4$IOB=RyU`sc)q_O1-*s?da9D*(Lu;nmp`4YAq zi!H}v%cafVaqev@*%biVao`%6tHDCwoG6WOjcqt zg2^Z*t1;P)$=R5kgUJIhc`zpDVRAku7h&=kOfJUcshC`X$)%WFnZe{LOkRx1HJH2% zlj|_K9+Mj}`6Eo;j>-El`2Z##!sH{E{2eBr!sN4dtuv5Y@3Cd zwk6oM9NWH*ZL6^Do7lD*+b+Sjwb*tQw*3IxwqV;{x~!>#$=zc5K9s zP1tcYc5KFuC$Z!A*zr;ZJN||puVTj=*zv#E@g8>cVMi7_hOlD{I~%bxft`n7=aJa? z73@43J5R>WuVUxv*m)jyuEx$a*m)UtuEoypV&^98d=fiDexNn_Vc z>{^0d%dzY0*tHtFF2SzLvFqEPBkZ~gyEbFjwb-=JB-~C?C!+wS=c=ryXRr1dp>qA!0v_Ey$HLH#qQIv`wZ+p6T8pG z?sKtwId)%x-QUITYp{D8c0Y*SPh$7e*!>)Kzlhy0VfUY~`y=cg!R|bE@51hJ?4H7u zn2KSl6;thGOm$(Z2UB}tY9^-k$JBwCnun?Rm|B3T6EU?IQ>S6-3{0JgsdF*498)VX zbvdTKgQ*{4Y73@*gsGb`^%G3piK)9V^>a);gsCSm^+!zo8B>4BVCrQ|{S#9=G4(E{ z{)4FxG4(N~1~D~^sUoJvFg1xi71&dWJvG=9!=480iDOR-_OxNoUf8oA_RPngBe3Ty z*mE@Y9EUxNvFB9mS%N*QuqSf~_Iw|Eeu_O$V9zVq^E&pti9K&&&ra+a#dHIv+cBNS z^c+mj!}NSiFTnIdOdo~mGckQOrq9Fl3QS*s>2F~AQcPct>Ghc2i0Mt3z8cflVAzw6RZPEu=>bgVFg=9n&*O^NEA|Gkw-kH# qz}_0{JqUXb!`_wH`z`Ff1$%GD-WRd=AK1H-fW80OwqmmhKlncfX@21V diff --git a/main.cpp b/main.cpp index 84d7587730..49d93cb317 100644 --- a/main.cpp +++ b/main.cpp @@ -125,9 +125,6 @@ Cloud cloud(250000, // Particles struct { float vertices[NUM_TRIS * 3]; float vel [NUM_TRIS * 3]; - glm::vec3 vel1[NUM_TRIS]; - glm::vec3 vel2[NUM_TRIS]; - int element[NUM_TRIS]; }tris; @@ -325,7 +322,6 @@ void init(void) //tris.normals[i*3+2] = pos.z; // Moving - white - tris.element[i] = 1; //tris.colors[i*3] = 1.0; tris.colors[i*3+1] = 1.0; tris.colors[i*3+2] = 1.0; tris.vel[i*3] = (randFloat() - 0.5)*VEL_SCALE; tris.vel[i*3+1] = (randFloat() - 0.5)*VEL_SCALE; @@ -376,41 +372,34 @@ void update_tris() float field_contrib[3]; for (i = 0; i < NUM_TRIS; i++) { - if (tris.element[i] == 1) // If moving object, move and drag - { - // Update position - tris.vertices[i*3+0] += tris.vel[i*3]; - tris.vertices[i*3+1] += tris.vel[i*3+1]; - tris.vertices[i*3+2] += tris.vel[i*3+2]; - - // Add a little gravity - //tris.vel[i*3+1] -= 0.0001; - - const float DRAG = 0.99; - // Drag: Decay velocity - tris.vel[i*3] *= DRAG; - tris.vel[i*3+1] *= DRAG; - tris.vel[i*3+2] *= DRAG; - } + // Update position + tris.vertices[i*3+0] += tris.vel[i*3]; + tris.vertices[i*3+1] += tris.vel[i*3+1]; + tris.vertices[i*3+2] += tris.vel[i*3+2]; + + // Add a little gravity + //tris.vel[i*3+1] -= 0.0001; + + const float DRAG = 0.99; + // Drag: Decay velocity + tris.vel[i*3] *= DRAG; + tris.vel[i*3+1] *= DRAG; + tris.vel[i*3+2] *= DRAG; - if (tris.element[i] == 1) - { + // Read and add velocity from field + field_value(field_val, &tris.vertices[i*3]); + tris.vel[i*3] += field_val[0]; + tris.vel[i*3+1] += field_val[1]; + tris.vel[i*3+2] += field_val[2]; + + // Add a tiny bit of energy back to the field + const float FIELD_COUPLE = 0.0000001; + field_contrib[0] = tris.vel[i*3]*FIELD_COUPLE; + field_contrib[1] = tris.vel[i*3+1]*FIELD_COUPLE; + field_contrib[2] = tris.vel[i*3+2]*FIELD_COUPLE; + field_add(field_contrib, &tris.vertices[i*3]); - // Read and add velocity from field - field_value(field_val, &tris.vertices[i*3]); - tris.vel[i*3] += field_val[0]; - tris.vel[i*3+1] += field_val[1]; - tris.vel[i*3+2] += field_val[2]; - - // Add a tiny bit of energy back to the field - const float FIELD_COUPLE = 0.0000001; - field_contrib[0] = tris.vel[i*3]*FIELD_COUPLE; - field_contrib[1] = tris.vel[i*3+1]*FIELD_COUPLE; - field_contrib[2] = tris.vel[i*3+2]*FIELD_COUPLE; - field_add(field_contrib, &tris.vertices[i*3]); - - } - + // bounce at edge of world for (j=0; j < 3; j++) { if ((tris.vertices[i*3+j] > WORLD_SIZE) || (tris.vertices[i*3+j] < 0.0)) { From ea66f0c2a83abe9de063a214c22bf9bc129da46f Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Mon, 19 Nov 2012 13:18:37 -0800 Subject: [PATCH 010/136] use glDrawElements() to draw the particles - no major speedup so far, but less code --- main.cpp | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/main.cpp b/main.cpp index af3a6c2b06..734d10ffbf 100644 --- a/main.cpp +++ b/main.cpp @@ -618,16 +618,7 @@ void display(void) glTexEnvf( GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE ); glEnable( GL_POINT_SPRITE_ARB ); if (!display_head) { - glBegin( GL_POINTS ); - { - for (i = 0; i < NUM_TRIS; i++) - { - glVertex3f(tris.vertices[i*3], - tris.vertices[i*3+1], - tris.vertices[i*3+2]); - } - } - glEnd(); + glDrawElements(GL_POINTS, NUM_TRIS, GL_FLOAT, tris.vertices); } glDisable( GL_POINT_SPRITE_ARB ); glDisable( GL_TEXTURE_2D ); From 6a7128243ac1a1fbc57fd52774bf9d71cb6c8a45 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Mon, 19 Nov 2012 15:07:21 -0800 Subject: [PATCH 011/136] Removed cloud rendering from main.cpp to Cloud class --- .../UserInterfaceState.xcuserstate | Bin 101257 -> 101209 bytes main.cpp | 125 +----------------- 2 files changed, 4 insertions(+), 121 deletions(-) diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 1cabcd6b99353d42e0982f7cc429127206fb4b0b..a232612957bea21e610cb0ad66d91fb97d090527 100644 GIT binary patch delta 31397 zcmaI62UJv7`~CkJ<&GMq%xxo85COx0(nSF~_6k;t6e&7`fQSX#%(3@|I*MQzHL*k! zjnNpByvFomOfxaf#Ar0#B&OIB^IL;&Lf-tp>yK+(=bmSu{p|Cceb1bGVWrdf;a1}Y zz2$!M0q3)=4dEV-nv7*f`b}+PuQxTAcA6eAJ#0E?I%Impbkg*c>1oq5rdLg`nO--Y zF@0eA(Dae%W7C(WuS{Q?zA^n~x?#F$x+P1pQEnwGvMOtGh#V$&mZRjZa=e@*_mI=% zesX_#kUT;jDUXuJ%Gq*`oG%y4C32}eO`a~#l4r|v8d0u$x2V9uQEs(u8da3D%nboQm9N;N|k9!r7}yIt2mU! zihZTBUfG~*RJJR1$_}MoX;Ahm`;`640p*DDl=8IljPf7ll=7@{T6tYLqr9PIla>1vfaL!GD2R~M+O)OG4c zb&I-9tydpVcdHMnC)KCar`2cF|EQHYM9`XGITK1R>cbM;Aj ziC(Ht(JS@odX>IRU#_pvSL&_3S^iTAQ`eps9{-yq{{)7Ir{-+tU!Q8^!((GYwZEj;Wo7PdzyQh2bu?&2b+hRGtCps`Q`$1q1kG-naj-8=7r`Z z=2hmk<_+dr^JeolbA#DsesFB3@|k62@6v|a2+wYY)wOw^+kN7Nlq94SAx#ugmXH=5 zZS9q1@OV#n{C2d&yJ4+yV(Evs_Ld)Q)9`4b?@{6Lgz$Juc$^X*&kK*2g~#i{<4xi5 z&e6Bu8DTPJU0Z)Ev)j?IFXkAGS=Tm4n~xpcedSf7f7Z{fUum;3cV28_a$H(n}5{PR+?53ZCWEd z4hxSX!owgu9t-VrJnKipS5*rq&Wni(j*Ux*Suk;4d~AGdRB+4!_w??gExKuw$yq!4 z7hh$YX*ZM4O%z9#0F8XPV;wsU7oMSj$tU7u@lH7fmk_ZF)s`JS#j-H^rZGE4;Sm zi=dj?BfkZ-JZpNlDbYF8dqkVg3y&9s$BRwCmui3iEu!TG)1{_FmrY*~ZMrHvUJ)Lz zHpO47&A1WS@;lQ{?s)I(rk{y6{VF`p2#+_K;%A#mF~PQUarV5VgxILy*rfQT+IEXg zhzd^X{*P+hk*OX1OM5TbAh#ge+bTTX5*}~Y{&AyY%QmuC?ZzM4d&%Ci&%MyQ!sA@+ zpqm|sndJaS>K?hh>?`}p{=(yZ;qig+_(*tsvPTY-gX9i!u<-a)cw7-4-wKZ(-NnzR zfI>NxB8Q*wIPc&c+E7BN98sHpJEQigJE?)qsK|S$51Ub4Y71^{Xdip8|KsNVM2Byl zPmfwxcu;NQdtS9?{_y$-P4YeXg=TnaZT{_Tre1O%N2*KiEj&JR$$f>#MQ3lx5a9G~ zVQAfFfFsW(4-_7spW_?a@SQwZ9wHBwhY_CVqYv(%K6>WV$+lq?6}C|YrNtv=PAe>~ z5+0X@#}~rmk|Q_IXJwO?HFxH=94%+mcKyABv|G-U$J9pu-p4dvp6JdyL3mts$yvhV z%jUeDC%N~s-Xd=m9yf%?t=hYH z+ovCpx5?Y(I(dg&FE_|L2w(kN^RMnU0UzI*K^z5qt+h(GrIL7_j-Q6hq?P_7sJuo9e?_58EELz`b1MqqA9jz zgQ1JznJv)j2SQ>K0YocwqLXAx1FO620!QH216Iq`|=0wqMsL1 zJD2>SkUX1<9(2K7^iPH4^&a2I7v<00ex%4dSN=l2BJXh~wlH*5y@g~F9yi~mjr^7T zwfv1?r2MVZ*1~ZAC?6l`9jw^o@8xURBQgh<7ZuMLRZu-;m;3`2|LnMj`boZi&*8%= zO8Zw#o;j_!yn0y0ZN2*I{Do$lesl&fz#K~9W6HPLL-7+NpDWjWPMyqE29!hF6 zf0~dw{zc#0P2WdIq5q<{E9vqT`I#pEPEGtR-ldH)SpG`>Mj5IM%k%O6$D;Qys4fta zkC4KATD(IWWrQ+P88v=HW|yL-KzQF)G}%DOP%?8zWDcKMT~Jt7>>jmS$xy~nF>XYr z`>I*6X}sdz&6Ej3>U^(oZs&%ZCMo&u?mQtyx|9MTS^hS3E4xzco=l05qW(1*#VTKs zUv8R9bkkhimiWuiN`<>t?V3|pDb>v>W(uk6U$vU!rmqoF%)jXu$XDbuj!d@+W1IL} z|C4`_o8PmUf2p#(ncr=R=Fn7Q|5|0MBXyUuS=l0_ZbC}l zr8t#sLh3H09?l^~Lt)1Ul--Wh-Abe4Qg#U`RY++<>bYClqda)uN+I za$ZQogfv`8BSL%qNYl?`g2P%6{Fk9V5mNXQ54Sd0Tcm!bTy>;Aq&%*Cu3S=%D_W0H`@3TeD^bX&tA`_Hsdeo=mPd$fA$ zwBn`{&G!E7tAsS}o>OlqH}9FUc$ad6iaZ~mF%?AxHNged!ErqjlY&PL_xa|YF-|Lg zI8t1AJ6ir-M%AFUais25Tc|Bn4|%<6R9mU7g_JF%Tp{HNk2^zzR3N0uLMq;?-lw)z z+o_(am+Gzh2&q&^WkQ-Rq?tmRBc%C4vb%Yk4mhfE?|UKV?=y#LcDj5F{aX9o3ruq6 zc^P^+2YMP5&kpyx@*U*)ba#KTJ1@2~_|)z{r?yo)-HR1EsD1l`!EM$1?_C!;sLlI0 z&6)0HkXv`T*H_}E%NH*sgFT?%*z0|&LKyS5= zkfsT#e3#l!?JuMXAywW33{^+C0mIyYswTilb(D~12&vk6+S}04f1En;Uw|w%TS&8n zG~2n#+YsQNuTFLY3fv4eO$TYK=5 ztg3bQf-9PXJDY-|&dI9kx))s49DK0$%}-*T7TI7LyH7phNZqIIR}ZKUs|VFX>Lco- z>R}qyQtdvX)uZa;MB}Xy`(`i6Q|eN%l)eOpLQA#D@Vb|KXXX@`*N zh14LVo%;xS*>MxGuUH28;S1ejQCt5te>%LO;H__q+ zA?*(BJvX>%d7II<)j$3}=s%m#4~F)x2&UML{|l~bEwy%z)P0(VCTT{kmDXBoqur;q z71CZI?Gw^|AsrCX!$LYJq(ee_WS{1#d1>C7k7m+jP0>^#Ju0Lph4iwJUJ=rVLi$ul zUkjs$Gse&0=k(MK-wo`bML1ITYQb8F)=>-9I%#2AxR4GD>4=aX6Vg#3JuakULOQ;e zPDE>wbmDa_nrMfQ`?*Yd!r|jc64D80Z?ob0)MQ6WBdxP#g(%$fM#(3HoMradqMr39bl$RFwtC(3{Eu`n0c2q4(%g*}mnIW^d zdY6_%#Xqin4DV?7hUW?CrF$a^v_hxJ&!9@XwaHqM)7#Gw>O8C)LYg+jrW2huMXPe8 z?$@SjR?ViBY16cFtwO8RrVHs+A-yJ~*M)RONN)(~tdQOm(py4$dq2@ywKh|mMYJ|Y zt07vO*HpfDg!HbE-fL>VFQoHLjSupC+7IrZo?cZ|Q8i#{S#@#M)bdj2n|_9u?aQ?l zLOR!YPwFadbMUH55+9I1P>{n`QT zVeO!hJ`>VKA$=~SOMA3Oh}I5kM}%~_X-Zdx^riD)fWaDbLObb5c4AC8SuwNfpNK!S zKi!DCLb_#l=?LleglPp+%ezdjtQ_@MJ=*D#-s*2d zA(O7VSM!FBuIieQZV2hNknRfU&pkR#|NNSIZoB_CVxKUos-kG-73C_ z)wFqPLkw)*Lcm1E0)#LOeN9t}p zUQh7X6NOO{Mx!vc+D){ctaoo(uhznNpR+93urR2X-sf+}=zWE;O|!goeSq6nVWWrW z!~Wjw_2I(U?!UX2KFX1@TOX}w=$XRkDU3eCsQfj&ojy)?9~%C#ZgzJm+#rhI`X;UdWID8zr z!q{IJ#|h(jVa#*R>S)+&+MsW8q`LHt!Wit*YlShy*(KBv*WamccXw|S#*Qw%P8dVS z3@?~k-aP4@`YuQEZv6qhQFjSrCt(Z|#_-+xZo>5kg)zdNsjqWSsG%tTVMm@zKPZfy z{~E08kLpi2QupeI^&|RY`ceIH{g{4S81EOxNMW=HW0WvP3u6~y?7CM!p+Bjg)SuGs z>d$z-C5$n`7%Pma!q{6#w}i2eX!)FTN+-iX(<}O``fIe&U)Rs*Z|G z3uC-6#yLfpp;yv5N8WDzJ^g+Cyf7vRW0Ej-dxtjqhx$j_-K@by#pTsgOQsfA31gy= zZZ&PH`UU+{r(d|CL(5(IXU_KFhOoYF^o=ISjsFhTuedLb$^X2Gn_jI^q<`(T(t$Ac z5H0V01?k`E-#br-8``(LMuoefx4D<-NBt+~op3|6;kxtw2tzmNSN*mlx%Trf_5S)D z{dc1EyTX_zj6Iu{vX}c|;pf(`PJQ?Pe7Wp?9h_S6s_sSldlnLW*3W^c2PFs2LR0AU;`jDv)6@Pk^TSuv{y z)vOEYf-nvd#$k?0!Z^ZN+}V)PHpuLLyI}4hj6 zFL7ht;>MZB3*$s#%xM;vWzKE?ujG@=dBT__jM>iMNJF51k-79=fGOsw!k8rIG02kh6U%lZDJF~Des$SnrE43o9EERTw|VV7-61a7-^pG z{4vrn(q15pQ-sm#;643s!<+49huiRk3uCD;77AmWFs3+i|61;4j=bIG<>nRUmBKh# z7>k6l_;1($W%YYE!T;LYys637CI7JX=>N;t|09?p!wC1sJl|aJz&$6cFi!nT$^&la zRSIK%ljqI5%)6c5QHH3Nd#G?XBJbS`=7-FCoztTXom=js!rcgWJ{M(BTRv<)==?Cs z5RvW=o+04b1w0eMGYvckf#(qL90{JI!7~#)CxB-bc;j-!~4_=qS z>n?b=2k)-n9RuEJ;N1(n2Z8qx@E!)0+g$ud zx&`!T(BnW)06hux6wuQ^?*;lG&_{!w4|*x+6`;=neI4jqKtBTdNzk7L{S@e@L4ODI zPe8v4`gQE+zkvQ5=r=*X4d$U>9s}kAFi!@v70hK|E(dcZm}i5z2F&xoya3Eg!Mp~{ zN5K3fnBN2Qr(nJa=F4Ed0_N*r{sr0_pnXedFG2fO(7sJOXx|pvdqR6ZXGpBUKV%rR zp91aIL;GXU{yk`a8+==WuLQn+&fHjo#oi5kyMym&@XZ3>9Pph5zIN~x;M)Mc4}tGK z@O=|}KLFp4!1o6D0e&sOFA)5~!LKv;m4IIb_)Q1D$H4C?@OuXQeg}UE{9A#4NAS0R ze>B+rW57QS{8PX`4gAN0|3vW52LB@Pp9=m};J*p{w}bza;QuE0e**rWg8ygWe-Zq@ z2mc?y|0ej~27hcX${0ad# zAmBCx{0@OW5aV!JZK84Z$V|)*-k(1p7g7X9!M#;QkOi8iF$+cmi|@&Vt|^ z2rhu&$q-x&!Q~LV3W94Pcnbt?gWx&{e%M)%U{EIBfe;S}F+qp|A$|}N03ksT5)2^` z5OP0+SRf=CLJ}ZkB81F@ka-ZY1VWZU$Vv!V4I$egqz*#%I^Rez_#2*eew|?O54jE< zy`WLFq$L^MLgE{NCz5f4GcQD83)0iB0K=NZts7CP5M z=bg~`0qDFBIv;?@SZ7T)!+HBoh;%{Z9*Eovk?znTh&&9DMhy9$-lW zOK-6B1Iqxg3|Wdc~Tv16G8mIAO8fu$5IR;29}?|@(WmQ zfaNw={(vYTswG4jA*u~TwSy>ch>{^ngQ)fpW%q}uAczWqs7??S0a1|<6%A1_5ETzm zNf6Z?qEaEM7ew`isG$%w9HJ^9Y7Rurg{VyswF9CWAnFN-dKRLdgQ%|{>L-Z$8KO-P z?FZ2T5SBOPFhWMorzZ>EYLi{5T{|3ar5Ah#B{BIEdCnOjkp(7+% zAR!tO213FpNXUSMxsb3J5|%>3UPw3$36DX-JCN`(*b^>5Vk=1WfkYV+J40eDB*sHx zAtc%$aT+A9hs14=SO84pxb@WtsQiWfo|QQTMBf`fo{dntrWTm z=(Y;Ft$}X)pxY7Xb`-juhi-0;OVI5uB)5X(Hjo?%$?=e!2+6sSTms2c?2x<~k~c&0 zR!BY$$)_OsG$j83$u}YSHgpey?h(-ae&{|Jx@SQ5G0=TFbe{{|=R@~H(ET`cKLOo8 zh3;QM_phOcAM^-?9%0a9DD)TuJ;p(g8t7qP1U;5Ok3-PoIP^FHJuX3yZ=uKckkT4b zOpu~LN)Jfs2Px^0QVuDzA*BXV_CU%ZNO=@e-h`A7Amt-S^?+1QNcD!)Zjjm=Qu{(` zDWq0HY8BX1w?V24Qg=h@i;#L2Qs06!Z%AtoX?~D45z-1Ftq9WAL7EfNwnN%ekoE$k zy##4rLE2A{_A~VKf}UpR=?gvMp=T=e>heaz6u7y6Xip-(0Bse(TLfj%!opI4!;8Ttl8-;U6C8uXn7 zedj>mN1*Q$(DzB`dlUK@pkGVqmk#|#K)+GYZ#DGW4E?r3zmK5bW$1SW`bR_mB1`lghIAFuGax-1(sLpG0Z88m=?5VF5~P0%>EFYEFc{DU z2E@RCsW6}l22{g<$6>%TFyIsnxCH}Sz(5ZeI0Ob}!oaaGa4XmcJ^%w?fmtb%V4DJqtQ(*9H80>(-i(v3mF!%)+{1Obl z1w&fE5DysA3x*7WAwyuuS{SkwhHS&mkhfsShcM)07#ajaBVg$LFmx&mt%9M|F!XU4 z`V0&`1w(&@p?6^zFl;ak%Yb2HU|2m2dk}{0h2b7B+!Ka-!|-u1JP(E!!0`Pr{4p5* zI1K+CMo4xT(F#VS!H5AcVi1hj4kLEKh&?dkTNv>RjQ9;k_J)yzVdPL4xfw<_z{m$+ z-U4&7WV00XePJz*BFxtKpMz4d>8({Q% zF#1y%eGxK}Afp##^nr}!kg)+WHbKTaknu5OT!2g&GW{Vl5HgD)vm7!jA@ek3z6P0R zV2lTh@q{tnFlIcA$%iq8FlHx=*$ehD`(eyA7;^*0+=8)#VQdDB9Rp*Jz}S;8_GuWW z!MGq87YySj!?-dSR}SNzfN{^lxaVNJ1mnG6ybp}egz-5reiDptfbkE(_R zWSxhsk09#;WL<=;%aC;yvUSLggX~_A-3PM!L-qj39t_#TAbTuikB96W$j*c8LddRw z?CFp_3$o`x_FTxeL$-kICGPfG$gYFzddPM`_HM|25V8+I_Cd&g6tYi1_BqJD0NI~G z_9e)^0@=So_ASW11KD>W#{fCQA!i)qOop6d$SH%I3dorbIn|Ie4{{blj)0sc5Sg5WaxOy7CCIq~IbT7}uaI*Cay^`%_c!?410gpUazi0E400nN_kPH=KyCu$ zCPD5{$Q=Q>qak+;UM}R7K;9I{n+kceAa4%j)j-}{$XgD1DqU^i8uIr-{$a>}4Dydb{t3u` z74pwO{#nR>8}iRVK`0bNK|yyYNP&XBP|zO=20+1JC>R9=nNTne3MN9qLMT`Q1&vVP zyB`W3hJr_+;0P4_2MV5rf;XI|fd+s3RVerx3ciDaYf$hb6kLabU!dR*DEJc!RVXw= zp&t|mLSZlzhC*RD6y6VoQBc?w3ge(K1q#!ka4Zx~g2H?#oC}4Ep>Qb_x}fj?6dr`a zcfnrx2^4+`g?C`G2TV4?vK1!JfXOpq@^P4a3MQY1BA}=Z6t#t-a46~u zMX^v62SvT0s1FqNg`$2?Gzy9`pePrL@}X!l6qP{HRO}R$LD6g|nhQk>pvVD5i=k*4 z6di}6&!N}{ihDtE1r)D_;%!h|2gMCg+z7?Hq4*&vJ_*IoL-C7H{2CO$0mW}Y@dYUU z42r*i;xD228z}w(ihr_0@h?!)0!lO}34#&}lte>G9F!zNNivkAK}jzt=?f)8p`;W_ zDxstbN@haI94J`{C2OE$9h7W@lFd+Z1xl_#$sH*914=!h)Ci?*ptK#7+7&3(q0|>j z1E4e%N(Vt{5tNog>1-&ifztVIFqE!=(sfX}14?&7sS8T?KV4V)uYOu}*>s+ud0ILH# z*2Q352G*5eT?5wjVBG}PEnwXS)*WEo304+4`W3)Z*6dJe4T!TJ$cFM#!;9juqZdKIi+gY`SG{s7kNVEq-WH^F)b ztarg?0GkKcT7m68uz7;b2W$%1bg=n?EdXpCz}69LVPNYFHVfFgfGrkm31I66wjN+h z1Dm}!*!qEO0N4hDZ5Y@_f-M7VW5G57Y}sI&1hxXO6@jf3Y*w&M16w87W`J!L*lNHw z-%Sd(MPOSBwiRGo4YqY)+X%MJU~_`44r~ozw>5%oH`pEm+kUVe1lyxvdkk#Hz;*&` zPl4?}U^@-A7r^#1*j@wM8(@12Z0~~YeXxB9wokzJ8Q3m??F!hw0^7G>y9TzO!1fE+ zZh-AJ*#5vy8Bo>|%8XFf2Flt&nKzWlP^LjydnogVvLGl6fwE3e76E0EP!#=+3Qet4$9t#vLB)B7L?tAX(2Ez5~fALv>`Ap z6Q+%YY13iaT$nZ=rfq|1b{9hCJ7DHt@R8~UedZ^q6m32^g0xC~K;V z2-Cx1dS{qE6sC`X>EmF!9j32<>8oJ+UYLFuJJTP7>2JdH4`BL7F#Qfxc|esBssf=Z z9I84)Rez`&4pk$eYBp3kplT6R9e}E%P<0Hd-iE4=pz0Ht(Gq5~gBe~h<9?VC2Qw02 zMh48th8ejqW1bymEP)xzV8(+m;}Mu~7-pP>8RucfhcM$dRJVj`3936lb!VuKgz5~a z&W7q-a33?OS3vbDs6GJIN1^%{RDTB5UqSUZFcX;B24=Q}nLS`;KbUDxhnb}?vl3=j z!OYDtvjJv405ea(%+oOQd6@Yf%={H*-hf%&FsnVx@`G8iFsld5N`+bDU{)T?Du7ul zVb&&?wHam|f?3C5)(M#PAn0kcoS?Dt{zXE6J7n9~C0w1qjIFeee_^n^LRVa{ZjQwDR&Va__3HK(BFG}K&& znmbVQ2h0tHxlu5;3(Or2bF*M>4$NH&bJxM#4KVj4%zYl_z6f(K!rZUzF!x)SC&N5{ zm=_512Ee?LFmE)>n-BAr!o1}$Z!gR{4D%j?d7r?%D=_a%nC}hq+rxZ6nBNQL4}$qa zV16acuYvjVV15J4e+cI9gZb_;Kf?U$u)rP*3wpqUR9H{~3o2m2bXZUe3+iFPPFV0P zEO-?bybcSl!h#=Q!B4QTEi6=Fp$-evVBr8*I0zO_g@skHuo@QD!@>t);a*ty8Z3Ml z7QP1yZ-U(bcDws`OP$>BwZYyM?3rNCaX&YJeHYjt2Kynf-voyN94*1o9~{HMF%ld* zz_AA$4}s$?|4qi$=ksWw2;HEZPW* z-hxFR!lI91aUd)XhsB*?@nl$B28+vK@nf*~DOmgrERkS|7cB9CB_m+TcvvzKmTZS5 zyI{#4Sn@e6`39DJ2TLPiX*?`Vw8PTbu+#xd7s1k#u=IIY`XVg-1C|+KS!-C<2bK+i zWy4_E8d$akmN{YB+pz2-SoR4l?+D8+usj-;m&5Yeu)GGAKMu>Ef#s)Qg(s}gVMTjb zF$q?<|Mp|WR9NvStT+KHPQuF8u+ju86<9eER!)GGS+KGeR@TGHov`w4SosmG`~+5+ zVO21!>IkcfVO2S-s)SYhVAT;=bre?p2CM#r)dpDI2UZV()x*HPdJU}J0;`>{`VCn9 zKCJ!#)`Y;CNLUjEYpP+*0&xGrea$DZ<_fI&64rKswcTKCcUUW6?J8Ki2G*W~wHILR zXRuC#bwRK$7}iaPb#r0ed|3B9tUH69b!TCHdsrU=>qB9E8LXcP>u1CIr(pdHu>K|3 zpuvV9*bodG3Sfg3Hk84JM`6PW*l-dy-Uk~M*r>tAT-aCw8>hg=L$L8UY&-#*ykL_V zHu=J)JUeWf0-LO`=>Tjx3Y(6>rf*==&#>uNs7-^~0Z=;#YPUn}E~woDwVy!k6{!6Z zHg|>1$*{QxY@P+1?XX$E=9ggeo3Qz9*b)L;B4JAuY*`6gHo+GAX4vvQZ21kg+=Q(u zu(dyI9ROR`!`5xEwGOskfUQ?y>sR2kfHMJ{N#L9b&V}G~fb$eMUjgT9u+0eDykVOO zwvC2uS+Fe!wrzuLF4(pkwp{}Iwr^qE_prSyY)^*mJz)D1*uECFuZQg)!}c#=`&Fom zfVvo{i-WpysGALSHBeUvb$g+1Khzz7x`(0e1k{~`x)-7D6{ve1>dr#l+fa87>OOW4x7NT|<%`teYo1@*a5Z-e@UcBprMRzUq0sCPnrJ=8w{^}C>c zFVr7^`a@8E9O~bJ`j4Uh0@PoG`pZ!N3)J6$`dd){JJjEWhE~v^LW2$sfzS{P4WZBw z4h_-J5CaYI&|pu3hBRpC4-HeHVJS4MhlXv?PzMbS(9j4ChoIq6Xm}bLPC>(S(C{KO zyaElcL&I5U_!t^Kg@(_e;W9K_frc-k;cM9G4LdEcGXr+cft}l6=QCj6`4a4W1saD# z<0xp%gvN2uI1w6ipfL{`3!$+X8mB;`4I0a#aXK_sL*r~{oC}Q$pwR)1i=lBDG_Hik zHPE;o8aF}X7HHfCjXR)mCp5aCaSwJH_d??VXgmashoSK(G#-b>C!z6aXgmdt&q3ph z(C8NWIy9bz#>!n6&k;W#_yo<2WY$wjlV+UO=!FWjd#Ii z0GEdyT&=)$AGkch0JsK&YZ$mjf-3`DW5G26T-o571g-*b*^9tc3N9)u+o{%T#-TKA}RCLnu*EJ09~C@Yb@ zR~7^^$N+@!KECgJ{lD(8PP|`$6DU~|D=EF2 z(mzuAXG*W9^e>d&OzEwZ-a+Zzl-@__gOvWA(#I%$lG0}=eKtYq^OU|s=_{1}htfAF z{eaRBDVw>hSo83KSK}l z`Oo(U@u?!o}u%!%J&9Jo$PhxmF!#gqj1csl%@G}|y7lz-#@VglP z4#PiY_@@k?#qb3TU&Qd84Bx}>I!0tl7}1LneHd{9BhFyNnT)uG5r1XGEsS`T5$`hM zeMZb-#J7z2juDlN_-T;_M)qW69wP@Z@>h(!h>@2t@-{}^&&UTE`356DVC094oXN;} zj9idlOn@m!>ErL^(mv~FzQ=IeaEOK zMjd9<5k_Y*x*wx+8GSjU|HSA&Gx{k;zrg617(J5FiAju}!sw-pUd`yWj0wgZ&6sq? z=%m@tG1pEF@O6V@_e zJrkOl(8feeEMVgCOgw>!KT%5)Z(-uCOnj4x|6}4uOq|8U1x#GT#0DlFWa1$v_S2O8)Cf&%S7nt-Klipy`Bqn{yq}fb5#N-qvw`cN&Oun4SS2FoYCO^;Q#EVQG z!Q_cdp3LM`Oy11otxW!rDalMpWlAnnikMQ&l&hF>EmN*%$}>!Pi778LWg=5%GG!K1 zwlJldDZBWhBVTmqi=KS(3%)p)FV5$S+ev)!0AD=B7t{D+E?<1Z)Q(K;&eWbvy@siO zW$GpRfgXw=}`pr!L8`D2z z`cS41lQ6x4>3f-8&x{k8aRxKaWX5C6c$OK@F=G)kRxqQSnZ21gfSCiCc{4NbX6C)j z9LLOQ%$&i@7G@sd%izni`0`@Dyp%6r<;!>Z@_oKs&X;Qwe7T-kU6|E}S=r3`1G8>q z)=kVRVb&;SjbTvNm+&9vf(7eXuz`hxS$HA~PiEmGEKIQQ zSr*P`;ZhbZXHgD|3RyImMGvy*NftfLqHkFAJ&Ts|?Et<#hHsDK+lTq~X}(SH?Hs=S zmT$lN$vMK}G!`Gt;;UGEEsL*b@i-PwWAO|YH?a61ix2VL`FwX7-(Ajk|K_`Q`0hQv z+rW1_`L2@hPvQHs`2HNe|Ag;{^ZiJcv|~vJmULvv6)Z_y!;))RGL$7_Su&m_jV$?* zC5KshHcKyI>91M(3QON+>ANgl$kJS(QNVkRqQ zvtlkQ7PI1eR#dT~i4`rZXl2EI%2Ft6PuVS$-A37+l-)zw{ggcnPhu*%r#SQ&vG)HD$XgznJo0Q~q1ZucZ8H%Ku3D zpDDkd^1o1iGv&8Zeh1}uQ+^-i4^sYj${(ZrNy?w0{8`GMr~D<#U!nXbIYapyl)p{+ zdz61b`A3w0N_i>epHn`9^3jx!qkJOeQz)NC`Ao`ZQ$CmSd6X}td@zgSp5R4|HbN8SY4iA^%hodWA#o}SF+|f)||nbGg)&sYtCiO zE3A2+H6O6%Bi4MvnxAs_v8I_d`&o04wMnc^VeO@?{Ud9yVeNISy@9p2vGxwuPGIe9 z*3MyV;v3e^XYEqfE@xc^>-w^;AM5g1H-L4=usYsub(>jN#kv~S=d%7-*8hz4C$Rn`)}P7xvq-G}7wg|;{rjx{AL~D2{pYM7 z&iY-fZ)N=f)*oVh8yniQ;V3rziVeSJ!)0u^f(=)(;aWCa&xTSqjAz3{HcVl|R5r|E z!`E!s&&CuswrAtf5;mr@F^i2o*mx})Z)fA3Y`lk!_p$LYHa@||32dCr#yM>KhK=*t zxRi~{*_6VjE^O+?rXFnS#issj%4gFxY`T?Ax3lRkHr>OfN7(cjn`R~0w3tobvuPQd zRMhy3!8J;oX6&THVVkAO4u@vEfd%>nJr(ip*XEIXcO$4l&Zg&qH4$9wGffE^#P<5PB&vST(ocCw?9oxRyPfSt#)^XKe5 zot+o4^O6KRuVUx#+4(1SUdzrK*m)Z}?_}pa?7W|y53%z#c7DOmdFgOJyCEja2?fWgAsNRWeoWsmi3P7gYl!R2@&%2~?dy)vu^JhpO|bx`?VP zsQMjMf1v6bs;;BzZ&W=<)l*cxOx3@sdWWj_srnyPA5--iRU@eyL)CbyCQs5(qFs*|WrrTQqUyHMSS>O!gqQ+*=Ur%?S1s((rK zSyW$4^{=V^E!9_2eKpnBQ++Gd4^jPhs-L0yS*o9>I`IZjf^Ds5fQu7=& zuTt|mHE)rqd6%01Qd2_BP-=!#Gm4tA)J&yj4mC@tSw_toYSvS;iJGm{?4YKWnmTG4 zscEKWKfAiJ>vVQq%&z;{^*Fm;WY-()`j}mxvTFppMoZW=j$ISkHHBTX*)^A4^VqeJ zU5nYZlieNI-HYAlu=`SW|AF0qVfQ`kzK`9HvHMAOKf~^4+5J4b|HJM#*!?!U-(&X& z?4H8z4eYL=wllT86V&EYdmOcAP!;QahE}8Pv|Ab`G`QQ2RZ#E2&LvqIMUxwbV9KyPw)0 zscmCVu;*y@bYxE^d%CixJ9~22a}0YVHQ4FRA|(^%qfpDfO38e+Biwqy9STZ>0XO)c=k8+o^wm`ot5|KS%u= z)W1djhtz*UeF^nLsUJ@L1nMVKKb86!)X$=R0rln7Z=rr0^}DFwLw!B<`>1cB{s;~2 zXh@+UjfM_1bfY1ch9VkHA<=Le4d>8sJ`ESqa48L!(eMWvuA$*N8g8WFuQWVO!%!L~ z(XgI|9W>O_aF~W8G^WwmfyPcWcBZi#joCEj(l~&|K{OW8cs`AH(fGK8#u6Gw(KwaH zIW#VzaT$$kXk1U@CK|WWxP!)88tZ6mq_LUC{p<_&b!1-;_T{s05c__{z7yDYGW$+r z-x=&XpM4jx?^5<%#=a}qcTIwQx3cd6_C3SCf3WXm_Pxr!*V*?L``%^WC+sU>-%$1q zXWuCHEnr_Q`wr67pQd7(PNV5Ony#SfDw?jP=?0o^qUjczZlmdbnjWI*5tfIgjRinhR(iOrrTjn$M*9BATzJ`42STNb_H5{u|A=(|i}r57Yc8%}>z$ zG|m5@`DL2lqq&6UaWqe$c?Qk1Xr4p!H#9Gxc^S=RG_Rz24bAIm-a&J%gyv>i+R>6s zOGjEVY3WK!cUpSU(w~-rv=q`(Ov`b!oJz~tv|K{VA87d#EjQ6}3oW)W(`MC(XeN7Fim)@iiPq;)o}b7@^n>k?X*(^^jJDq1(w zT19IUtu3@3VShXJr?CGg{n!2u?C-|@9_;VU{%rQ=vj152|BU@7u>U0XpUM8S*#ABI z*Rp>-`!}(F3;V0tzl#I+a^Oi0Jk5cBaNwUD_%{ds!-4NOu!aNcIIxien>kP=;Xn-s z|IWdGa`1T$zQn;-IQSn9zQMsU9Q=ZV(>OSjgR?ofjDuAi+{M8?9IWSHBR@XFk1z4# z8~iw)AE)x;Dt_F}kNf%YAU_`F5Dq1AC^f;M{v0ai(6Jmkoc=zI=c#Gy+$bQy;( z=gR?)VWwhG#+ zXxl|wEl2V=avVpF=g5g1IhiAOa^z8tJkF7)IPwffCUayiN50|60*)-=$RXhqXLEMAFS7=~j_+he&!sB)uk*-VjM|i=_8N(rA%1RwRuRNfSlV6p=JdB$DQfq(vfW zu}E4ilGci(4I*i?NZKZnc8a7uBB@R!)r+LVBI$@oZYPpcL~@!)?jVx8iRA7gxtB=p zE0TXMl1~)LCyC@!Me^w)`Am^~sYt#|BwsF)uak)68$|L=BKa1Pe49wVQzSnmlK(D} z9}~$>isUy$@>?SLZIS$*Nd7=1ee4HYmq!pBrg%kD@F1ek-T0c?-0oi zB6+_^J|a@mMM@`;(pjW*ONf*nBBi%TIYp$LCsHmDDHn^BUyGFAij*rw$~_|G5s~tk zNO@AEJR?%x7AfzFl=no+2O{Mok@BfX86#50imrk0PZ_q;?RgokVJ;NX-_hIU=>cNF69r3q@+NNIgxY zo-R_)6sc#6)bm8@g(CH8k$Qtjy-TFtBT^p}sec!#kBQW$Me099BK0|u`o2gVFH%>E z)D0rFTBPn0sSP5vNu;)l)PtgZi1x{%eS6XVXwm*m(f&`O{R5)?6Qcc7qWue^{l7%} ze~b37iS}=b_MeFMpNaOvMf*{r{aDd{f@nWYBHGUs?PrVjb4B|ak=8?`ohQmuy~kv2@EjSy+$McO2h z_Jv5BF4E?Rv~NV(0+IG@LZp?6w6!8_n@HO!I-D;$Tq!#IPIUN-=y0>>aI5HWhv;y( z=y0Ft@Sy0B5FP$0Iy^5ryd*ljB09V(I=m)2Ocov1iu86O{TPvcvPl1xNWV~|UoO(G z66x29^u%99`fVcp36cJkNPj`3za-LM5$Ugr^bbV(ha!EHNFOWGCy4aPB7LezpCQs` ziS#)leVIrv6X`2O`WlhGUZigl>03qm4v}6j(i=s3vq*0h9f^+ZM55y_M8^w7$BRYB zUyF{HiH>iJj-QE+rK02KqT_JUafj$wFFH1gj?JQDtLSv9=ybm5bfM^UiRkoe(diA* z>0{C9Q_-nZbQ&r;?Gl}S%0D1FwTX-n8Ob8!D2d2O7a18MBTHoT6&V9X#&II!43TlB z$T(MITp%(o78#d`j4MRO??lEmBI7S2;|`JWpvXvwjAup0iz4G?k@2d?cvED&BQo9> z86S&`VIpI^$e1NEz7iSpewwsUWGogL%S1+*$XF>dHi?W%k+Dx?G>eRbBIB^gBr;P( zW}3+CATm3P%-$k%fXF;vWS$^0PZgP`i_9}c=D8yC0+D&K$h=Zy{#j(+CNl33nfHp! z!~-JpVUhW`$b9OjfXMup$b3^|{!e6nBr+$8%qb#sn#i0fGG~j-xgv9($XqBg7mLjA zMdmV*xj|%Z5}8{?=5~>}TV(DLo%2QKpNq~Xiq5Bq&Zmja=ZHk-^F-$-MdyEs&aa5h z{}G*E7oGnrI{!~}E*G7*h|b$Y=bfT+rRZEIIyZsPej*gqU&7I^&8Q3f#~|J=(|vx*S(@^gXr2My0(h0 z2SwMzq8rhzv*^}UbjuRmdL~4-KB8Mc(JfDO8z8z365Y-c-L4YdZV=sG6WvNgw-ut> z4$-YabUP@r+Ka5CL{_@U$`DyyL{^r_>L;>tMbE|>vWNImdH9+WL+S# zE)`k5E)!W-h^%Ww)~zDzc9C_D$h!aiXa5mh6EBIZcSP2EB5SzF8YQyEimVADYqH3i zDzavXtXU#!j>uXevVIU*t3}p2k+o4|Z4p`9MOKB#sutbTME7FR{VdV_M$!El(S3;M zzEE`EDY`d^?)yZKpUOjz6wxD1^ynaZbP_!}iyqxXj~=4OAkm{x^e7fRjuSnO7d=i8 zJx&%qP7^(D5Iz1Odb}-qOcOm;iXQt#&m7USQ1l!udKQbGCyAb?h@Pj4p68367mA+0 z7d`(Z5k0RJJ#P>_ZxTIk5j}4cJ?|7f9~V8J6g{62J)adlpBFt}5=(St) zIwE?fir&3N?}4KC#iIA`Mejd~-gk=L_lVy2i{1~3-j9ghkBi<bM4z)opX)@Q z2SlHLi9T!JG$o`ecK3gKP&k@<@itKAd_N^lOc9DIT$i7!(KOnLn7TM2=?B_)G^CJ5N zk^Pp)en(^v5!u5;_DGREMr4l{*^@-}7b1JQ$X+C}zZ2O@MfM7j{e#F}Ewa~%?2RIO zcS7{b6#e>(e!md?{v`T6CHjpP{pN{&D@4C?(QlRLw^sDqAo^_<{pv-(!=m32k<(7( zq==j}k<&rsWQm;qA}3$u6o{O`BIj6ff`Bvn7FLG9joXsLB6pz3JwfE2Byvv{xu=NS^F;0iBKJ2U_i~YYmB{_Q$h}VF-Y9baDsulO za{n!Ir-x#|0dD@X3_sv z(f76 z23#iw+#m+rBnI3f2HYkF+$jd!Ck8wv5(AzQ16~jV{v`(dTMT$j40u-z_^%l7KQZ8A zF<`M6uuTj&AoAOZ{A7{eUgRGw@_UHsX_xK#|?AqG~8 zfprNnsFN7fRSY^o3_3#$`jr@Tju>>Q7<8E!bcGo7XEEqDG3X94=x#CSJ~8MqG3Yfh z=xs4*q!=_w4B8|HZ5IU{ML};-&{q@;6a|H%pjZ?fCklQp3QiXVXNrQv*`nY)QE;Is zxLg!mFA8oK1&@e=Pes89Q7}prj1>hFM8P6auu>GP76scx!A?<7B?@+lf<2<3UKIQ& z3fe?rh{9x1m?{d75`}$4;XqM%tSCH96kaJ3g*S-8zlg${Md7WY@D5RUmni&~D128G zzAp;@Ckj6ng`bJSQc*Zn6wVWc3q;|!qVRiBxJ(q5iNf`waHA;PCJJ|o!YWbNBnn$Z z;g6!QO%#PFN)<&vJr9b~MNw~2G(Z#`FN#hOMW>3Q(?!vlqUc;vbb%OGc&8Ygs1k#BiNUp^I7D%> zC~hx`j~2xpMRA5G?kS3Ui{d;{oG*$CMDbuze4;2mMHK%+6rU@KFA&8Si{dLq@ztXE zkD~Y{QGBl`zF!nSB#Ivq#jlIvH%0N=;;+T;iQ*4Laj7UCDvF1T;!&b_vMAmpinohn t(#0`Z;+QkUF=vZoZWYHoCXRVp9P`}&?R+n}-T%WLWB(uie~V*Y_&*P%RxJPk delta 31138 zcmZsg2UwIx_x69QJZnYRr>%%IY082iBBEl!Ua^3UB1MYsA|S}p~( zniw@kV@xq7F}-MtCdNcxjp;FoG10W|T6h!l=KI41*UmZj+;h*IGxMBh_To08|Nl({HBVB^zgO0*KM^icXJ{golgNF`O7piEJwDmhB7QlMCsSxTicU$HMx zYLunQGG)24LRqP7QZ_5K$`)m(@}RO;c}Ur(>{lLE4k*WzCzRvL3FUd^jPinVR{21= zpj=cwR6bHJDW59;QLZW1l`oWEl|NNfFI7@osjby^s*kFus@h)lQ$y5FYN#5fhO04Z zS2b3RQI!>LUPE|A1e0927pw3pS)p_bd)vhj8SEy^$ z_39>di@HzUuRg3EP!Fn)sE?|L)W_7r>M`{d^;PvX^>y_P^-c9H^=< z_J#JHc2oOa`&s*4Z>6`^+vtj}>K*hzy|dm$kJ2rAH@%0Rs3+?K^^y80eYBpgPt+&r z)ASrYSFh5m^*Q=neV#sFU!d3M3-u-XQhl|)M&F`u)wk(ReW(72{-}OPe@s8DAJLEM zkL$;D`?LCU`b+w&`kVSY`n&pj`bGVc{;7UVzo~z(|DgY<-_n25@94klf0)~vz0K{+ zK4xFD$!s>aHwT)7%;DxJbG*5`Il-K4?rZL69%vqJ9%G(po@AbE&N1hj^UO2NmFD?o zyLpLuxp|d&wRydHvw54jZemzzbxFy2YD2wy8P?Qhd++e=CZq%*^%Bx#Ax#mI{iv~B zhQaHi@Va}nz^7roaf)@y#?LPvZP9S3hu?AG^_1{>R(PEeUT1~ZYr^X-;dNeky?^wr zbEzid)XZ@o9X)nD9C* zyp9O3qYs!?o7NCxS|_|77hcDNmqBM!s~?ada{w^WNp+>k2L&U^+;Q z=~3ZzN_aip=zpfx@sp+bUI1#q?Q|f*YpKi7|aCyj~YxZ!`klti5nMs`*XRPaePk({0nw z#F*{~ueXKQJB@&Ip(#&HEiRw6;8bQ!LVRL$aJR&S_(fA|;^Vp}{I7~XWv|+Cx7xRp zCD}-fk5zcRE4<#TUG{UQ=H9Yg8+W69J6Vy{Cf5hT>q6~YKX)4KF9$o4ACNoB0dk-m zB)mQpUYCT|W#RSd19FJmNe-36gx6K!^@Z^IPI%q&ge8|e^5sYh936z$M-I->iXw_- zOKsm@wrgFR>>uB=$2Uwv*faSy2^#uP0!l2 z;WN3?Lr^8WzQ0E>M;21AFfM-5$DzS3lGz4Z%2O3OE!8f~3521APJy!@VLCEgWMJGcD4kbD|fB1rzw zvl1T($@e|Jls}d)dty$3PqutTzAEo^`ZP0iQcXfqgx7E9X(eBmKa+15#>$^Nhcq*E ziI;t)b6By-U&>!)rlyT3EhwBfF0X3FZux7<{%L4JeJg+GbTu>hH@_)=?|i12p^yER zeEXiPpM|9T74EwpUB3!R|F^C`<*V`m2Q7tUZj|-MKV&IhLXv%re`mSULTTklb}KD~ zunqs2Y`o=UHK{Jn(~_Lq#l9{PSl3ja5~eV}|*ezuW6qLJUTfXYz$ zx_m_4!9+2j^Ag2}u@Gq;Ioxv{FVXqm?m}Q`5Q@G&(vDY(ZnJm9fgW?9{X| z)m3@WT1gHBdlRQD1D5R*Sh4Z9l$9rd$lBsyk6eUYY(Qaj$ zkYYT5FuUhSQKk#2>%S*dEMJvhX`D}N<9s|p`YUsl63=?Iy*GTBGW#C6Cv5jz)yiCt zYmSiO-O4;6b8oXU1( zhf*h`enJ`|q!B_Ib5Bl#;`T7@Qtnq=Lh3K10YXaIqwH4pC=Uo}ppXU$X|VIT7KUB+ zN0h_=7xaj7RC!!TLxnUk49 zRoM7M-{12_OChB4C+6kwX z8;(A1d>qa1P4z3~M@RBQ%Gb&_%C~Z@a#Q(U`9Vlig_J3zY~eNVPa)+9DNjh#A5v~9 zKPk7BpOs&fJIY-l6$;5Jq*5VO2&q~~^M$m~!`t|XSAK6ggEJh|p8hDdHv6Kfo!YF) zmgOMFcTla{H`ZzQHUxUNXfjXpn0wWByV%uv!P}saaQiFvwLr627K0byn z{*h{w2hiCAur&fKYP66_gfz?9#@EovKVI$O0d)5O${GQQYEL1R3u$%}pszaMUw{;K zppYtsROK}J8anz9Q%8CL!#xah8W~2ZqlGkANb{N)Ca4qtrD2jfSx5_nRO1}(VaQT* z{sqWY^MtfWNQ<3zUqgopMUFlXtHtUJ?~C5gsy4MmoyA0XhxcV6IfS%CG=En#cL`~k zkX8t3l_OF}ZAA0Se@9ELY+69^cjctc-R5l=q%3H%FTH19>`XBk1}mb;vHYH6xyRA1 zHuYUuRaZ56SKjlkbAH$4HP&Xo*IHHYYeKBPhp6=+22R|j-tS1>r#jW`>JGI|-Ko~A z4eBl-trgNbA*~nE1|i)iq>Vz_B&5yzh*90@Zes9JcM+yOND!ILfR%Ir;xS_X@`*N zgtSvg^+IacM+9N&8TAGAMfD}Z)R!IE9{ycIx?e~S3S(=BuP|ykzIsY!<;=>eywa*6 z1v9J4Dk?|k%`dBV_EQZn+22*)cO<)8PZ7JkoNo~ zv90=vdexD%yLCb9LecDb(d^9b*2U^I(d-2w?G7EVAh_`ix`+N;{p$Zif76J5Aap=k zFohocUvORhUcKv?{}1Yq>Mivr^|t!6`ipu;NPC6!kdXEXX}^#j7SaJB9Td_d`_y06 z-_+mLKh!@pG=tVmNRJBXgpghm(pe#05Yop&`dk>BJNxT~4$fw};p;Kpn&wD;NNcD0 zXug_BlQl(Cg>*R7;t@Uwadq(su>~lgoEu=F-dciYhh@U}s?rm@Q(LRJ$ zhE#26R%+UWywc*rL1opYRYH2c@gUZQYa=rLTP~y(R_)eCQudFwui>25)P0PQUTo?y zRvVY&Yx=uXXsjXA3aj>Lq2@%NN)=1Eg`+VpBOD)o30fQqZMhz#Aq`c*ZLhH zofFc#ji28W()*1cKFINHKVtBZAr%#66+>s1R25dtEG>4P?O-@-uh!-W>Ab6HGV`?g zS%2$ooW(qC0c8_Y)6xp78gpE$))qD4J?}NAHOF5O)T*_me>=2lZH1?CZ-%QiPaUDH z(bj6~gmh6z9}4NCJ=%sAp706jl2h$y$cx>k?Q|qRpgFbe+77KwNSB55iI6@O(v=6a zdSbL)+WkVh+Bl`_Li)_<8)#6SCpsD?nf7Y?97%5NAtC+8t?d`mHD^$Op=0w$v`3wW z5JRZFt#(9v{2t<%kZ#-)cS3v8k?i`1+EdyKp6q*Cdq#U!drmv8J+GY+(icMdQb=D3 z>1!cz@Mte<2Hyg!$FQmJk4_*dgG)x;Yc!=}U0K*&3N`HeX zD8Bomzb`l1YVT_oJgPnr(hqL!qL6-U)NTJ*`^1s#)-DU_mRtK&NIy-TSz1+CQIt2m z@Sf~z+Gifnbs^n$Yd3`S^Q5%0>WY6NzS6$&AifsTFK+EyA>ElWD{p3L*XiZu9_z9U=WL zq(2_ezV-a-EgLO&8vp<4d@aZjVE5L29m%`3Z*?D!)yQrSun9J4=B&Kp!pgq}Xshdb z`@ePRe!|%7FPE-&)C2w}V0y3~=1AV7hv=R3P+{~EMoAcrdx+5^^vK32v=GKtIlj)~ zV8g<|7(MpyOwi+mvE@DD?s|eJU&6;H>An9x;q^Yk*yg_{yxz~zXOG@rAE2iQV_RYL z5k~p1?rrtK`mp~!xb@-s2x0UV#&*uRA%+P5RDJxv02A~yVe}P7lk-G~A;f=*p7}38 zmYyw)iZH6qTOo!BzkJ>1NZzMU*9-JQy+|+CXXrC^t1#-$xK0Kwavw4JEW(J;%k|lM zg`#^`UBU>1U2;*d7oFa_5&azI1hfIrg&;5dK7sepBE`%}I85wGbAG};&=`pVm z#t^r@N*Ft(kI9=^dT*BN^o@?BJ^FfmgMObdh6-btFoy5ZHxZ%N3S)$4q(RPxP(wlP z4o8k#uM@_|zdGxBgZ`i+`5}Fme!uS0-TH2QkN$u#b{57i!Wbot7GaDQ#u#Dj`jEa? ze@Ne_@7KQ859kMlF;*DkgfUqd2MFmGVN4OmfzG@z!$H#%`f>dPt@J1LlloKoDg9|@ zV7OsoizH!;7shVR1>uJNiO)N7_ULEy7xWi}vAZz#5XQuFw9?P&FKb_Cj3_89t(sXh zv#>%K6NL0j<6)@3roZm|F5D1ec*ALmFobvTV6QgSmFsSMW`UMBV z*jpHTd6K1wV*R6S-iBVCn&>{(FX#9wgD0hX-rQ#umR5}}o1RxSv#fNt{)ux_gdwo` z75%F7NQ5EA@E_-=5r&@9XZlx;q}nSt^+EdA`ZvVr-w9)1VeHo!(*BKa5l6mu9r?d5 zX+1ZC+12F*c~yluRTVQUJ9nww<8)ww^2@HgqVZu1>zZ=XP z>FBfD+}Yek7{?1^`d^9D)*Nk){okEojx%=?#tFih=4{)=5aOR`?)5J~Z*w1EoG6Tw zoWr{qg6sp#DgSe7mB87e82dP~n3xu&y7>oYi4u9=|rrq(s z8E;swv>~GTZgYdPU$h~)xy$T!E{ZlprUZa@ zD0s(#cTe!{58fledo*~D2k$iSo(SF<;GG5DIp949yc@v#Vemfa{36;AJM$uV-vsX; z!21?>-v;kH;QbqT{|W7yK|2ZBwS;zUpj|s?7Y^+PLc1(zHxt@9pxseu_ad~r4nBZS z2k_|zKJnnwAAC}rBF3P^OaPxu@W}?BT=1C=K1JZ;0-smF=Q{ZMfNw|e4RoH4F@)Ot zgYP)-odCY+;5!L?r+{xJ_)Y`gJn$_5-`U{12z-}=?@I7p4Zdr^cMJIL0$&&S-UQz} zVDbW!H<*IJ6al7KFeQO01x&-hG#X43z?1`aQy!S6gQ*ZqrC=%t(-|-fZ$cZ2)f!rJ9WRQn|JPhQmAnykG0g(5Cybt8VARh(!4^Y~G;th%qC?-%k zkb}|@6e}omL75Lq4JeC1Spmu_P%eV<87QBF@+Bx=gYpw7KZDvE)M22G05u)dNuW*v zH3QTvP^URlVhv_{C8&EqeG}B*L5l`03$%HlEeCBSXlp=Q2ikq0Z368<&>jWtG0={I zb{w=PL3;_bmqB|2w6{Qe2ekJ=y8zmUpnVS7ub}+~x&(SVu(4C+k1^pE0&wzd!^fREp3;JcyKL`C5=(j=t z1@yb1{|4qUOvXG3%+tYK2<8$nmx8$*%oSjs59S&$F9Ne2%qzjX0nA6i{1li!0Q09{ zz6$2+V7>w7TVTHJd?e1`Z}4%x5@+xa=?v{tp}iH_Z-Vy6q5TKY{x|TGz^{c<>SnOm zFdG6ELx2MUmP5cw2v`jP8z5jK1Z;+Yoe=N< z1nh-?{Sa^f0!~4|GZ63`1UwG`FF>Ff0)rti1_EOtFaZJ+AutI7`$AxU2uy*%5fC^Y ze*$d~I1d6BKwu37E`-3<5V+PE)7@aUyCCoY1U>?Rham7U1RjOJV-R>9f_x#UKLnLR z&@Ko%2|;He=oJWh9fID3pm!kXJOtfJJf{P)z9D>(D@Ky+RLU0`f*F*3l z5PS%N4@2-#2tEeEze9)-LKFzmAfzLN1VTtKgmi+CE{r9_0wFOF5(^=T5RwTYb0K6A zgsgy&RS>ckLe@h_9fZ_F$bJYp03nY+$RP+h3?WA$uqX(NhOl7}HV(ojKv)`tMn@*1|n`l zWOIm=AhIn)wu4Ath?F7H4Y3sK)g)Gdhm8KUljJ?eL`0G8%pF@mKPSlWWc2P`sJG_bS> zi$7Qb!4d+NFt9{|B?>GtV2J}ucd#Uar59L|!O|Zr1Hm!`EW^Pv3M{E$84s3puuKL^ z26imjV95o`bg&eGWhPikz)}X53b0gzWgb{+z_J)D0xZkGvJxz7z_K1J8^KZwmTh3! z0hW5O+z*!BV0jQM`@nJlERTZaFjyW3%W<%rw1eeousjErGhlfMEU$p&b+EhzmUCcv z4=fkJ@)1}rgXIcXu7TwSSiS_yH(GME8K`B#7<<(fuGg1)>K-^caX93(>P7 zdI3Z)gy=00-2l<|L-dmn{X9g!0MTDU^p6nz6T~PG(-C3jCKc2z0#& zU9UpdYY^)Pv7I0`3}S~t>^SViPJq}25W5s&mqYA+h&=+ak3;N#AogpB{TAXhhzo?c zV2JAtaf2Xk2*edY+$@MIgSZV4=Y+T&5O*Bno`bmOA?_Q9yA5%_K(~(2EgZT-7Z46yAajxlJPrx(f<575NcaSLw1OTo^iZKk6!hp0J$gWo0_ZUddXzzrP0*tbdelRY zXQ0Pf=|r0tM&0+LQc(iuql9+K`t z(r?f!7yCd`thu)FU zdkplR1ihy~Z+i{&UIx8aK<`7)`vmkp3B9jD?{A>@chILb^iiOX27QvD&k*P{4EmHq zpZUO%eRn|LozVAb==&1%eHr?G34MQrzCS@f zU+Cut{rsU{59rqq`VD}7#n7)D`c*)`TI}?zhkm=D-%HT%E$H_S^t%Q9eusX4LjO+C z-va$(pnod#pA7w{LjOh3e+Bek1^r)#{_jEm4`4ti7+`?`F)*MM2F!&4^I^b?FyKuX z@HV9QLW&=x_}d|6CZtqAN)@C$2Pv;W%4;yt4+eIEfnhMP3ZV=(YZ82A(n zybFVx!ypL;4TC}BV9*2@v>pa+gF)M2&?Ojj9R}Ti!Lcwn2?qCu!Si6SUBKX_F!&4% zejNtC2}9bz5EX{#FeDv@WW$gg7~+N@55tgyFytBx`38o32SX!aXdDcUhoLqYS_MPr zz|bdP=(8~NGz|R}hIzp-BMci2!zRM8$zUJ09frAK*d7>m8HU|}VPC-To-lj>3?B%? zSHbX2FuWFqUxeXTVfZx|5f3B!z=*yuVm^#m0wb2eh^Jx1OEBVP81XBN^n#H_7?}bi zN5aU_F!DYaxg9$r>tN(L82J&5{1`?B!>BGW$^xToFsceh&4E!*z^G?o)M*&?GmHjC zH-pimV01c+o&=+J!RUuz^nMs)gfTua#sp)gz?eK3GabepfH9B5m?vP&A9hG>0jaGZ zwLhc|ht!dfx)V|#fYiN^`VFMshSXnR>_8Yh3dW9svD;wm{V>)IW3R*5Z(;0B7}p=h z4To_fVcc36w*|&+gK-yO+*KHN4aO(H_`WdSbImk#B6Q6-eW|$NVlRCkqLYPzv zlgeSzlQ8LdnDhcnZUK{hVX_R9C&J`uFgX_{-w%`b!Q_Ww@)t1q2bg>dJ5%CdN-vnw z2d31(lw~kw1x$G!ro0AI-hioXVX6jG%`kN;OwEU>1u*qdn0g$hJ_#8TWVC|}U&t5_ z85xj~1sU5R<3Y%H2r~9T#(v0n0y0iO#u+$Rucnvb%gp7-jaS1X$fsCt= zaUC)~hs^ennE;t7kU0o4heGCX$Q%WksgOAtGN(f3G|0?@%mT=q4VjgYIS(=yK;}Zo z6p*Igv^H^^B`nC3Ymu?^CV=x51F4p<`u}i2AMY?^B2hc z6*7N^EFh~nWQ~QaDUekNS;df53R$xus}i#2K-MD2azNHnh{{?4SsNhhC}ceiSr;Me zDr8-QtQ(N^C1m{!S$CYP2OIqDGGqrqb|=UVhwMnm?gH5s$c};RM95Bp>@ko%4zklA zdlF<%h3qWI&VlTF$S#EJ8IWy*>?+8f1KArO+X>k_Ap0m}pMvaXAp1ke{tskdhtg?n zVVVZh%rGqhruBts{b5=LOq&kV3SrtFn064RJqpvlgK59Ow7Zbg3vvcR&S1#Nft(qT zV}%?W}x?7JYx4LJuP=Mdx^ft+KIa{_Xnf}F1)w*};? zkgG$kALRN&ZXo1_KyDP|Mni5l$W4IUo{&2ja)(0hD99ZHxnm(W9dai@?o`OlhukvA zoelQfIgmRKau-0Z9dZTaE`!|lkh>ppk3jC@kb4|*PeSf%kozX&z74tOA@_aA3x~X} zkk<$D`a<4d$QufI!y#`J&@_&cvMws3b zria4xXqes=rkB911qo1)0tJJhU@#O6 zfr1H8kPZboP>>G=g-|d9I|Vi`<@-3YJ5`DkwMs1^Q`8J_v>TpzstFz6gb9q40Gmd3veQ34e8greS1)E|mcplC1@je(+>P*ed$RZuh+iWWf8S}588 zMH`{07K*k((G4iN2}Qp{(VtLkgyNP^+y;v6?VwnL;`UJN55<8{91g`Jp|}W&%b|Ea z6xTrUVkj0+ya9?gLU98WyP$Xv6z_%N{ZM=miVs2YDJXshicdrF87O`cieHA}SE2Yz zm|=t&F<_sO1~cZvj9oC}A(*irW;_ftj>3#%VC@aozF-{y)wd5v1nVKN9s#RI=n1es1=eT4dK#=Rfb}d`Uj^$MV0{~` z=fV0uSTBP05?DX6gY_y{uY>h-uzm&BZ^8OKSZ{&#XRzJ{>+fI#Y|X)D1Y0YxwFR3G z*krJ2U~3OHf3O9DEd*>~V2cD>6xd?G76-QOU`qsBFR&$p%~QbG27+w}*oK2`6xdS1 zHXdy0V4DoK46tQ`Ef;Ll!BzyenP4jcTN&6Yz*Y^md0?vn+hVW@uq^}IO0cZ~+j_8V z1Y0fGwt;O2*y_PnH@LrDiH34oGdC<%p<2q@_SCDBk43nlST(gRA8prj9!^n;QVC>abT z!=Pj&l#GFraZr*5C6l0JDwJf|p(F=N@}Z;?oK$24+u! z*;8P)fZ1zc_BxooA3L*;!0g9i_B$~9LzsODX8#TqMyO~B6+uwZ87iWnVklIMg^KY| zF&`?HK*chsI0zNTpyD`GoQH}_P;nV5C8%r%mA+7EfyxA^OoYmGsLY1S96MAlg31+8 zxe6*Dg38CB@(5JE4V4$6@*}AH4XPxlY5`RtP!$DL(NL8RRoPIL162a5)c*I>~bu;_DG^gS&45f*#H zVjUK@hsFJ2@o-o?5*FKFaTP3{1B-XT;)h`IepviEEPf9be*lX;UtrjqgIxl91lVK2 z-VN-YFJ7m4Drc}i0QN_~ehBP$!O{}9G`>ZdvN>+A_c@q z5TikC2jK>>2gLUv?t<`~|4W9!l5wzP0xVetOE$rhT3B)pmV5+DK8B@1u(UHQje@0x zu(TAGmc!D=Vd>Mb^jTQe0+#v0G8vYQgJn};Stcyo3CkXUWqV=Se_+|yu4L9kAj&thfX# zF2l+&SQ!H=V_{`Eteg)kYhdLQu<}`0c^X#vz^eAJsspU@Eb*$Du*wFj4#TRGu<8`7 zZVjsySgpb8@vu4rR%gNLt+09*taicb^RW67tiB9u{9sKdSQ7?oieXJTtf_!C55tNy#Q-Jgms~?E*jQ#g>`daogLN* zSa%uL-GFsp!1_2?-wW3Ff%Qva{W@5`0oK0{>py|@S73t~HUz_lPOza8HY|hj3wK!hPXzUn$%-7w(%6_dN~wy#)8Y3>(d`F&H*>f{oK*V+m|5g^h<{ z<4M?f3O2QcO&V-6!=@bAGy^tSVbdYlbOJV=gw4LN*$*~*E-*Le!DcILE`iMlVe>KA zd>l4^4V!<0%|AnJf2bV}wIiW+C)7RwwR@rVGSuFH+Am;BH`vk}wj{%rd9X#mmZh-e zW!Ul#Y&j2GLt$$)Z0!nL*TPo&7TCHCwtfd&e}S!cVOw9=HWaoEhi#i+TODkxhi#w0 zw$EYPm*9*6XCgS0z&RJ34sb33=V@@h3eMMIdrR1Eg6#@yPlN4QuzebAuY>J-VEcn$ z-+m3Ye*@dUgB{&qM{n4X3_Di9j{9K8CfM;Y?D!0Jd=7P8pe`Qj5}>Xe>gGdT4b;^` z-F~P$0CfkU?h&Xv33aES?kv>33UzNl-P=%i9_rqQx+~bJy9RYPpzcek`v&T6!p>mW zISO_zfSvnc=SA506Vx|@dM~JN0rjn+-W%$Dp*{@iW1v13>Jy;8C)D?b`e9H%0_szt zemvBtL;X~!&w~0KsGntrdI!`Cs9yv1PN?4j^}C?n4fPK|{eGxF2=#}c{sh#&3-upE z{U=a=73!};{cWhf1NFZ`{U6W(G_-;S9U9t0Ll87{f`)Kt=nM_9&=3y|_8!oX1P%S6 zVJI}%pkXC6Y=VY5XsCyV`=MbsG#rA4!_e>yG@OQp7og!RG`tE8Z$QJ_(C{%ddm^(wf=f@=b}CW31UxH7>t4P1HP zDgak8xUArs1+H>%Rf1~{xaNavA-L?|S^}=+;93Q)wcy$Su1(cG_iE*H4= zV8^u=T>HUw5L}1Abp%|;z;yy#Pl4+haGeI%3*b5nE|1VR!1Xq`&V%cHa9sq~C2)NL zuB+g>4zAC^^%c0j1=siBx&^ME!F3m0zk?faH@Ab^2<}$kZVPT7aLeG1Uf@m!cYkmX1osed4+r-saHoQMJh;=rJsI2?;LZkj zF1YQ}!CeIIncyw~cNw@Vz+DaQdEl-A_hN7ha4!S*N^q|M_j+(|1a~dCw}E>Hxa+}v zKe%^;`$2H;1NQ-NKML-{;C>w3$H9FP+)so1Ik3CWfcqtIzXI;p!TlDv&w=|ra9;rT zN8r8;?knKF2JRdDzn1Phz6W;e)@>ct zcm29*>#DYnYPE#Dr-)2JML`r2_MTagkdTBV%={iaKEM0Vd7OJ+=RD4z_q^^s=NPh+ zA+-#tXGkMMni;Z>AqN@KMoBwLQYcBKq$4GrDd|EC8tyJOG?h6gdP;7hPPbm2>CG#lxfs)0PY@=is zCAAFgz|ahac4g@43_YKr7c%rNhCay9KQi=h4E-lV-)HDVhR$H^z2Dz_33u>?wx*nPD>-_6@_nW7r;s?Pu6QhM&OjGZ}ss!|!1D{S1GQ z;qNj06Ndkn;qw{3nBhwpet;3ljA+k@BmB>ZQyFnOBd%k_t&F&x5r1LCtBiP^5hEBe zkr9&_v6K;O7_p9#DU9sQ$P7lF%*eADc`hUGX5>SRe3+3RF!FOo4q@b6Mt;x8MU1Rq zWE~^-NEp?XQN0`jAl{GipAg7BgxIqpBFShf$4; z&SG?bMh{^0d5pe{(N{3~0Y*R0=qDNd9-}{D^nV#Wo6+AT82vq?%Nf0!(e;e!!k8?^ z^kK}Yj5(Jv=QHL3#yrlLCmHhrV?Jlh5XQ`B%wonYVN5e)+87&*?ZMbw#`a_EwT%5O zV{c>Zi;R7pv2QRoF_y8@7(1P@OBuU{vFjL@#JDuZb!OZ^#vRAF6Bu_XLI4J zXId)L`Z2ATX}@6F4NSY8X?HN~uS|P`Y5!o_RHn^l+FYh>W!g@r)$m0+U-aaQEWY?9 zUtGi&7nAtnLB4o`FP`FyulQmCUwq5-bf))YdKS}fW%}=#ejn4PFnt!&=P>;cU$*DV zRKC24FYn;XyZCZEUry)C8O&(UjC5vnVa9FDxR)7!V8(x$F+#$OQOu}hMm;kc`08Z7 zI-9S~<*VoT>Sexqm9G}@RT*EcU}hg?4q)aWX5P!pKQZ%BW=>+}SInHn%vNRvvyzx~ zKC>=o)|Je9n^_++>tkk>CYZI3SsR$$o!QyU&SCb=%)X1+_b_`Xv&S)e0<)`_y@%P2 z%sGQO7cl1{=Df^UpTXCE;p=8t3k1m^D`F~64ibu2iY1?RKiLKggk1^;5f2P|l0!9f-r z=9^#f&DDH!4c~mkH$(VlDBtYjn$w(@f}KM&;R0)D=qpC9ArC-`|HKhNOjnf$zurR`Xn%+iZl`fCzPuVv{c zEFH$u5iD(B=>e7=V%a$?yM$$zvFufrz0I?f90v#gnA ztt{Kevi+2Hp!6t8@1yhqO8-ddBa}Waq4X(ApP}@5N?)Y(B}!kR^mR)APU%~ezDw!* zlzu?z$CQ3b>F1P|P&%B_QIw9QbONQ5DV;{?mz2(=bPlCoQ~C|13n^Vh=@LqpQd*Xv zbS0&0C|ytKCQ7$bx`Wb6N_SFPOWEa=T}9b7lwD8RO_be2*=>~FLD}7u-Amd1ls!b* zpD25bvL`9~GiA?F_5x*prR-(OUZd;{$`Wr<_6}w5QTA`jKBDXs%08oP2xY@48%fz1 z%EnVRiL$AbO{eTD%4SnGkFo`neMi|3l>JEA&yaLoShcwz6tFt14Jk#p>f(eKxDlW%UKDzKGRtvif6If5Pg|Sp7d%AHnWr zbqlNav-%)wQdrZ0HCM9c7S`O#n%}YJPS!lYnul03g*Ee7lbFw%Z&|aDHA`7j%Gxfh z?aSI+*7j%Z0M;JM+T&UK2i88x+NW9jENh=po-MNY;&JT@CB@vF-rt4znKXk79jC z)}P1vt62YQ)?df^8(4ok>+fLwFxF3I{Z!UZXZ;M;&u9HNtl!Uu4s19|!iF?9bY?>j zHuPe{?QD3E4S!_ABW!q#4bQRRFKn2?hIwq5&xUW=u#gQ)*-*;H4s7hs#$(vni;cb6 zn9s(6Y`m3?_p|XqHa^V8N7(ob8=p(CaV{HwWaCe4T*k&SHm+ymMmBY4Qy!c8vuPlk z3fOc4n@(cWAKCOQo1SOWi){KUo8Dm4KiE{xrUo`Ov8jbk``Fx$&B<&&i_Mp@`3g2C zu4eN!Y`%rfx3YOCn)UMo zgsuN&>;KqV!q%~D9naQ7Y&)85X>3bpTNk!vv8@l=e#f?l*!D2n9%b9(YNX39$?2G+3^TF9%si>?0A#*O3K$zzMk?;lvh%|lk!^1>nU%fqB9kzP;nuNiifCpl8P6oc$JFx zsQ4EZ|E1!8R1BqJ1QnyHm_o%DRLr1a78P@;_@0VoRBWVTGZj@-?4n{f6?>>?qT&#h zs7#`=J(WjM*_F!P5-JB!c_Ni3Q+YO(=TUhfl^0Wa8I{*jc_WoKQ+X?uzoYUGR6a@N zi&XxD%D1TefXa`l{FKVisVt##ER_?eoJ{33D!-)iYbqB}Sw`iN{~wiGsoY6rEtU0D zHd5J4WgAuPs7j$Km8yME+Pq3U|7ZldZA zs_v)iDXRWV)nBQ4nW`iCU)38_y-C%-srrbjPpJBgsv%U3p=v5sUsJV!svoKPnW|E% zR#3H?sx4G)r>cUgYN~3e?m+diRG&rl@2P&6>ZhrGnd(=mew*rllBoU{)gMy*AF78^ zJ%Z}dRF9*2BGogfo=^49R4=1?E!7*S-c0p2s>`X~P4ynCo2YJ~dOtfe*?AT_FK6fD z?0lY`e`Du+?EIXaL)bY+!p`yRoW#zl?3~WddF))k&hOay13Q0YX9c@Dv8y+`E@ao0 z?7Ep`t*xSV z54COVKAPP<*xifW{n$N#-39C}X7{n|K8@XHviodypU3VC+5J0qzsm0S**%Nh-?F=u z-Rs$1#qOQ#Ze({eyZ5pCAiLYBOQo)(gu2eub)l|1b=lMnr0zKC&Z6!d>Mo(~a_X+4 z?i%W@r|vfD?x5~&>h7iPe(D~h?m6mSq3)m5y-(ez)O}7}33bD%8%5n@>ZVcmC3Qy% z*y`p`w~)Fr>NZeUP2Dc)ny71`Za;O0IMR8s{wV6xs86T9EA^Sw_oaRi^~Y0xHudLH ze<}4>P=7V`*HV82^|w=hC-wJGe;@S^P@j05`sb;Ch5CO||33AfQvW&iCDae6eiZeS zsh>vum(BLE~98{))y+X}p5Qt7*J8 zLE~>}`~!`T(|AOGiN?Rv_!f=t()d1&AJF(YjU_Y=r*RaGV`*GSV=avbXzEYXV4BXL z=^~o0rRjQ_ZlmcAn(n6QUYhQw=`os~q$%-dnx3QS1)5%?={=f0rD-%xV`-X3)0Z^O zq-hRKU(>XRrX@5jrKyajl{D3}w+DL%u=gtV-o)O!+4~TCpJDHF?0uQNud(+H_P)v9 zcS!90h`pb%_cQhmVec^Zj%V+e?48fvpV_;Ny=&RKfxVmAyN$i&?A^`YJ?w2_Zwq_( z)0{+e2F<-`E}*%H=96eXmF6>OK8xmaX}*-^DQnpVIs}%_TGsr+F03lWCqw^EWgvqq&Uc^)zpyc`MC3Xs)EWp5{iH zn`z#cp!pyzDYT^1(uC0xc)gavCjX(sDK}7t?YXEmzX=Yg(?O$ zrlp0J{j?mS6|G6MrqP;7Yd>1^Y5fJQ$J2Tet*6p@2CWy+`YT#5rS%G0ucq}kwBAYU z!z5ZCq4gPBpQrUjT3@2|6MT5zfi*dYuJAs`)_3bZ`gk)`|oD|ZuTDm zAL0NGBypfU2ae*v01h0>f#W%F5(iG@z$F~GlLPl~;64sKz=4N2(856+Jc@$_96XML zS99?99DFRn!6!KQGzXvM;9od+WbYFW4&&em4vyyFI1bL?;MW}dhJy<^_ydQoA zx|>74=g=QG^dN^G=Fp=YdX_`abLd45y~LsSIP`B0eZ-+pICP|d{m?oNZRb!uhZ;Dv zmqV=_I>4dB9M0l!e-01eZ~=#lIeZp}&*kt19R3xDFX8ZV4sYOaHHUX`_%LljTQY4O zXgiv=PPApv){VAfNVN5$Z7^-e(RLzjr_gpfZNH@L9NNyO?IPMPq3s6RZldiL+HR%o zUE2Oj+vl{E&^C;=wX{{xRz=$`+G>S3%D|B2*RMDptrk^Fa&{FX?5S0sNdl0Om2pNZrlB6+$gGk;lk`IX#BIPKN(nF;56)6Km%84T7WRY^3NI6raoGnt$ zONf-)M9Kpqn%n~WHMan#pvOuJK zCsN8p%1V*4TBPg{DHS56TBOv7lsb{pAX3^y`w;DuMf(n-ed1`*zLRL*TeKe_+Mg)e zpDfy+A=;lM+Mg@hUnJUJBHCXr+W$_pe_FKvM6@pv?I()%lSTU(qWvtyz6&;dAhaA!2bkX5_(cwbT;YyL{@N3cGI?>@q(cxy%;a<_<52C}LM2E*j zhbKjcKZ_18iViP{4zGv~uZs?oM2Afx^=OgWO{8Xu)ZQYsK%^Fl)DuMN$s+YMk$R>` zJzu0=BvLOCsh5k?>qY8K5|MhRNPSqOJ|a?|7OBsQ)W3+-gh+i&q`o0i-xR6uh}4fn z>gOVLlt>*bI(8Htdy0-(qT?XZu}F0Mh3I&^=y;Ooc&g}lhUj>)=y;jvc!lVAwdi=Q z=y<*8cw<6zd`@)yTy$I{IyQ=q2Sr+nNb4lhjuC0SL|Q+QRw&Yr6=@fUw2MUARU+-z zBJDbnc7sT}OQhW+(jFISPl>c=MB4Kr?M0FHl1O_+q`fZEJ`iaii?mNgTHDMCVFq>%iby|Nq<0eO86y1{k)9*c z3q<-UBKCcMvzlijNNPk77 zzb?}MF4Er@>7R=9(IS1UNS`Fqr;7CHB7K%fpDWVmi}deB`ZAHeR-{*m^eT~FE7I#l zdZS2hJ>vL)NIxtxI*5!6k&!JjazsYH$QUFtibTe7BI87naf-+|M`TQnC#=|1xQIYY4$aq?0JS#H(A~IeS883;9S474?MaKIg;{%cLk;q645g9{8#yXKv zDKe@>MvchWEizg}#y-*I1kvRj(d9hR$xJ)^?cFwI??q;(e-B0^;XgKccSZE zqU-NP*FT7^4~nkOi>@z_~ou5XI2Z;P(~6kW%OuCqkf&7$i8(XB{y zyHa$!UvwKSx-AgheiYq)65W=GZp$U2+ZNGno9Nakx;2Y#`$V^cqFbBj-cEE+5#3Wo z_l}}_AJM(9=$fWBJ=km^D&Y6gvfkGWIiu4Ulf@yi_F(V<{KjOJ(2mj$Q&Xvr-;liMCJ^UnV2Oq z=ZehvBJ*33`Mtf(V;+VstM<>xEL-gn-dK@Eq^b$S#h#om2(W9T}af0Y^lIU@&=y8VV z@k`O;Y|-O9(c?nV;{nm*AEL)+qQ?T!W3%X)BzhK!o+pW(r-+`Xik|0+p68367l@v} z7Co;OJ?|7f?-4!k6FnagJ^v_rJ|cQPE_yyC5j|fKJzo<&-w-|D6g}S&J>L^O|1Elc zBzn#jJ-3No9Yn7}(d&BA>j}~8KcZKO=rvaKnjw1461~0^y_SkzD@3o&qSsc@Yp3W{ zD|$7HtfNI%7m+nUWF0TEu1Scj+eOwLBI|LH^_0kZMr1uNvR)KfFNv&IMAqvf>+d4# zEs^yRP_G6 z=>4qdop@LD{#f+>RP_F@=siO8o+^6J6urL|y%&kzOGNLVMDJCi_ZrcAt?0c?^ez{@ zD@5-)(YrzPZWMipKJ7%GqeP!H(I;K>=_>m45`FrJK7B=>JkjSY(dT-R=<|f=^H{7dxtQ1tnq=rdIG86oI+D@4v!BIg>BbG^v9N#xuja_$y6 z_lTTFMNZ;zk@J+uc}CRbJzC^;5xIRu zZm!7fFLDQp+(MB%Smd4|a(^ju&l0(3i`>ga?v*0pMQ)wQZ4kM8MQ*FeJs|Q@B_i)=k=Ie=rHQ=$BJX&SccREU#gVqo zk@riHcaF%rMC4s2@~#ni*NeQHMBXhT?>>=tzsP%708z}mnCit`r5o5e2^$1-FZWJ4M0WqTpUp@Mlr*k|=mX6#Pph z3dW0qnWA8}D3~V-7KnoHM8Wr>V23E!BMKTtL9-~>CkhUVg2SS)t0>G7g?XYdUla}! zg+-$97ozY~QFyv2JX;i=CkihVh1ZC}>qOyiMB#5m;q9XEZiy(oR}|ha3ZE8*2~qf_ zD12KKzAp+t5QQI$!p}tE5K%Zx6iyO_Ux~tnqVNY%xJVQ(7KN)t;aX9+K@@Hlh1*17 zxhUK%3hPDTUQyU83J-{)R8e%aC_0i@6m=CvnW8956lIH|Tv1dcicS(m=ZT^VMA0Rp z=yFkXl_d;!8#G?V|WjQGAamzE2cCAc`Ln#ZQXjr$zA#qWG_(_+?T2 znkarp6u&2m|1F9?6U9SB@i0+5UKCFf#ZyJ`98tVj6fY6QOGR;+DBdNCYsG!Vb)xu4 zbbCedVKJB(oFoRf7lYHq;4{SFbHw2L#o))p;19*%PsQK`V( .999) tri_scale = 0.7; - else if (r > 0.90) tri_scale = 0.1; - else tri_scale = 0.05; - + //load_png_as_texture(texture_filename); - glm::vec3 pos (randFloat() * WORLD_SIZE, - randFloat() * WORLD_SIZE, - randFloat() * WORLD_SIZE); - glm::vec3 verts[3]; - verts[j].x = pos.x + randFloat() * tri_scale - tri_scale/2.f; - verts[j].y = pos.y + randFloat() * tri_scale - tri_scale/2.f; - verts[j].z = pos.z + randFloat() * tri_scale - tri_scale/2.f; - tris.vertices[i*3] = verts[j].x; - tris.vertices[i*3 + 1] = verts[j].y; - tris.vertices[i*3 + 2] = verts[j].z; - - // reuse pos for the normal - //glm::normalize((pos += glm::cross(verts[1] - verts[0], verts[2] - verts[0]))); - //tris.normals[i*3] = pos.x; - //tris.normals[i*3+1] = pos.y; - //tris.normals[i*3+2] = pos.z; - - // Moving - white - //tris.colors[i*3] = 1.0; tris.colors[i*3+1] = 1.0; tris.colors[i*3+2] = 1.0; - tris.vel[i*3] = (randFloat() - 0.5)*VEL_SCALE; - tris.vel[i*3+1] = (randFloat() - 0.5)*VEL_SCALE; - tris.vel[i*3+2] = (randFloat() - 0.5)*VEL_SCALE; - - } - if (serial_on) { // Call readsensors for a while to get stable initial values on sensors @@ -365,52 +324,6 @@ const float SCALE_X = 2.f; const float SCALE_Y = 1.f; -void update_tris() -{ - int i, j; - float field_val[3]; - float field_contrib[3]; - for (i = 0; i < NUM_TRIS; i++) - { - // Update position - tris.vertices[i*3+0] += tris.vel[i*3]; - tris.vertices[i*3+1] += tris.vel[i*3+1]; - tris.vertices[i*3+2] += tris.vel[i*3+2]; - - // Add a little gravity - //tris.vel[i*3+1] -= 0.0001; - - const float DRAG = 0.99; - // Drag: Decay velocity - tris.vel[i*3] *= DRAG; - tris.vel[i*3+1] *= DRAG; - tris.vel[i*3+2] *= DRAG; - - // Read and add velocity from field - field_value(field_val, &tris.vertices[i*3]); - tris.vel[i*3] += field_val[0]; - tris.vel[i*3+1] += field_val[1]; - tris.vel[i*3+2] += field_val[2]; - - // Add a tiny bit of energy back to the field - const float FIELD_COUPLE = 0.0000001; - field_contrib[0] = tris.vel[i*3]*FIELD_COUPLE; - field_contrib[1] = tris.vel[i*3+1]*FIELD_COUPLE; - field_contrib[2] = tris.vel[i*3+2]*FIELD_COUPLE; - field_add(field_contrib, &tris.vertices[i*3]); - - - // bounce at edge of world - for (j=0; j < 3; j++) { - if ((tris.vertices[i*3+j] > WORLD_SIZE) || (tris.vertices[i*3+j] < 0.0)) { - tris.vertices[i*3+j] = min(WORLD_SIZE, tris.vertices[i*3+j]); - tris.vertices[i*3+j] = max(0.f, tris.vertices[i*3+j]); - tris.vel[i*3 + j]*= -1.0; - } - } - } -} - void reset_sensors() { // @@ -443,7 +356,7 @@ void update_pos(float frametime) float measured_fwd_accel = avg_adc_channels[2] - adc_channels[2]; // Update avatar head position based on measured gyro rates - const float HEAD_ROTATION_SCALE = 0.10; + const float HEAD_ROTATION_SCALE = 0.20; const float HEAD_LEAN_SCALE = 0.02; if (head_mirror) { myHead.addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); @@ -567,8 +480,7 @@ void update_pos(float frametime) void display(void) { - int i; - + glEnable (GL_DEPTH_TEST); glEnable(GL_LIGHTING); glEnable(GL_LINE_SMOOTH); @@ -600,36 +512,8 @@ void display(void) load_png_as_texture(texture_filename); - //glActiveTexture(GL_TEXTURE0); - glEnable( GL_TEXTURE_2D ); - - //glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE); - glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); - glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, particle_attenuation_quadratic ); - - float maxSize = 0.0f; - glGetFloatv( GL_POINT_SIZE_MAX_ARB, &maxSize ); - glPointSize( maxSize ); - glPointParameterfARB( GL_POINT_SIZE_MAX_ARB, maxSize ); - glPointParameterfARB( GL_POINT_SIZE_MIN_ARB, 0.001f ); - - glTexEnvf( GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE ); - glEnable( GL_POINT_SPRITE_ARB ); - if (!display_head) { - glBegin( GL_POINTS ); - { - for (i = 0; i < NUM_TRIS; i++) - { - glVertex3f(tris.vertices[i*3], - tris.vertices[i*3+1], - tris.vertices[i*3+2]); - } - } - glEnd(); - } glDisable( GL_POINT_SPRITE_ARB ); glDisable( GL_TEXTURE_2D ); - if (!display_head) cloud.render(); // Show field vectors if (display_field) field_render(); @@ -802,7 +686,6 @@ void idle(void) // Simulation update_pos(1.f/FPS); if (simulate_on) { - update_tris(); field_simulate(1.f/FPS); myHead.simulate(1.f/FPS); myHand.simulate(1.f/FPS); From c91dd5c4c2e6ea2960592465239b900d69ee1d00 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Mon, 19 Nov 2012 15:20:24 -0800 Subject: [PATCH 012/136] More little housekeeping cleanups to main.cpp --- head.cpp | 119 +++++ .../UserInterfaceState.xcuserstate | Bin 101209 -> 101683 bytes main.cpp | 29 +- tga.h | 470 ------------------ 4 files changed, 123 insertions(+), 495 deletions(-) delete mode 100644 tga.h diff --git a/head.cpp b/head.cpp index f2132e788d..98cbf0d0d3 100644 --- a/head.cpp +++ b/head.cpp @@ -62,6 +62,125 @@ void readSensors() } +/* +void update_pos(float frametime) +// Using serial data, update avatar/render position and angles +{ + float measured_pitch_rate = adc_channels[0] - avg_adc_channels[0]; + float measured_yaw_rate = adc_channels[1] - avg_adc_channels[1]; + float measured_lateral_accel = adc_channels[3] - avg_adc_channels[3]; + float measured_fwd_accel = avg_adc_channels[2] - adc_channels[2]; + + // Update avatar head position based on measured gyro rates + const float HEAD_ROTATION_SCALE = 0.20; + const float HEAD_LEAN_SCALE = 0.02; + if (head_mirror) { + myHead.addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); + myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + myHead.addLean(measured_lateral_accel * frametime * HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + } else { + myHead.addYaw(measured_yaw_rate * -HEAD_ROTATION_SCALE * frametime); + myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + myHead.addLean(measured_lateral_accel * frametime * -HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + } + // Decay avatar head back toward zero + //pitch *= (1.f - 5.0*frametime); + //yaw *= (1.f - 7.0*frametime); + + // Update head_mouse model + const float MIN_MOUSE_RATE = 30.0; + const float MOUSE_SENSITIVITY = 0.1; + if (powf(measured_yaw_rate*measured_yaw_rate + + measured_pitch_rate*measured_pitch_rate, 0.5) > MIN_MOUSE_RATE) + { + head_mouse_x -= measured_yaw_rate*MOUSE_SENSITIVITY; + head_mouse_y += measured_pitch_rate*MOUSE_SENSITIVITY*(float)HEIGHT/(float)WIDTH; + } + head_mouse_x = max(head_mouse_x, 0); + head_mouse_x = min(head_mouse_x, WIDTH); + head_mouse_y = max(head_mouse_y, 0); + head_mouse_y = min(head_mouse_y, HEIGHT); + + // Update render direction (pitch/yaw) based on measured gyro rates + const int MIN_YAW_RATE = 300; + const float YAW_SENSITIVITY = 0.03; + const int MIN_PITCH_RATE = 300; + const float PITCH_SENSITIVITY = 0.04; + + if (fabs(measured_yaw_rate) > MIN_YAW_RATE) + { + if (measured_yaw_rate > 0) + render_yaw_rate -= (measured_yaw_rate - MIN_YAW_RATE) * YAW_SENSITIVITY * frametime; + else + render_yaw_rate -= (measured_yaw_rate + MIN_YAW_RATE) * YAW_SENSITIVITY * frametime; + } + if (fabs(measured_pitch_rate) > MIN_PITCH_RATE) + { + if (measured_pitch_rate > 0) + render_pitch_rate += (measured_pitch_rate - MIN_PITCH_RATE) * PITCH_SENSITIVITY * frametime; + else + render_pitch_rate += (measured_pitch_rate + MIN_PITCH_RATE) * PITCH_SENSITIVITY * frametime; + } + render_yaw += render_yaw_rate; + render_pitch += render_pitch_rate; + + // Decay render_pitch toward zero because we never look constantly up/down + render_pitch *= (1.f - 2.0*frametime); + + // Decay angular rates toward zero + render_pitch_rate *= (1.f - 5.0*frametime); + render_yaw_rate *= (1.f - 7.0*frametime); + + // Update slide left/right based on accelerometer reading + const int MIN_LATERAL_ACCEL = 20; + const float LATERAL_SENSITIVITY = 0.001; + if (fabs(measured_lateral_accel) > MIN_LATERAL_ACCEL) + { + if (measured_lateral_accel > 0) + lateral_vel += (measured_lateral_accel - MIN_LATERAL_ACCEL) * LATERAL_SENSITIVITY * frametime; + else + lateral_vel += (measured_lateral_accel + MIN_LATERAL_ACCEL) * LATERAL_SENSITIVITY * frametime; + } + + //slide += lateral_vel; + lateral_vel *= (1.f - 4.0*frametime); + + // Update fwd/back based on accelerometer reading + const int MIN_FWD_ACCEL = 20; + const float FWD_SENSITIVITY = 0.001; + + if (fabs(measured_fwd_accel) > MIN_FWD_ACCEL) + { + if (measured_fwd_accel > 0) + fwd_vel += (measured_fwd_accel - MIN_FWD_ACCEL) * FWD_SENSITIVITY * frametime; + else + fwd_vel += (measured_fwd_accel + MIN_FWD_ACCEL) * FWD_SENSITIVITY * frametime; + + } + // Decrease forward velocity + fwd_vel *= (1.f - 4.0*frametime); + + // Update forward vector based on pitch and yaw + fwd_vec[0] = -sinf(render_yaw*PI/180); + fwd_vec[1] = sinf(render_pitch*PI/180); + fwd_vec[2] = cosf(render_yaw*PI/180); + + // Advance location forward + location[0] += fwd_vec[0]*fwd_vel; + location[1] += fwd_vec[1]*fwd_vel; + location[2] += fwd_vec[2]*fwd_vel; + + // Slide location sideways + location[0] += fwd_vec[2]*-lateral_vel; + location[2] += fwd_vec[0]*lateral_vel; + + // Update head and manipulator objects with object with current location + myHead.setPos(glm::vec3(location[0], location[1], location[2])); + balls.updateHand(myHead.getPos() + myHand.getPos(), glm::vec3(0,0,0), myHand.getRadius()); +} +*/ + + void Head::addLean(float x, float z) { // Add Body lean as impulse leanSideways += x; diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index a232612957bea21e610cb0ad66d91fb97d090527..b653b6168a2bc179a65c76fe4b742f86bf36cb02 100644 GIT binary patch delta 30848 zcmZsj2Y6J~_V#}RoS~-8X@nFK(jb$B6bK;+y_e8JO(8vjnS?+RdYReu-U1AS1Tzy5 zL@bDkqKJqff{K6z8+NgvA_(e5;d>@r#OwD@d*-b5o_D=_ueJ8s=gd6$V4Lw!jd7#a z^iBP(O}?EBRR{b1+|st&w9oX2=`qverl(AYOovU+nO-yv}J~Dl7`oi?J=?BwQ(>2rYroUu^Y?Rx`-m)h9%I)PKIaH36qvZs-yWB(W zCl8c|%cJD+@+3J&E|90n)8qC3 z{IvX%d`x~>J}#e@FUS|=cjR~F_vH8GOY#@;m-1Kg*YeNuHTf6$SEZREDMqEa(n4ve zcqrbAk780ZrK1w1bXF3SE=rQpUFo6pRQf9YltId1Wt1{n8Ka~tW0fo=TbZonD7IW> ziZWHPC|0FXsZwSt3zbERO|dI0l~u}WWwTPPY*C!b9%Y|$KzTwrq&%w}QI0Cdl~W)YbajSWp;oF@>TGqs zxV}NHR?{aR;^QasSm03s#A5T4eD-nullI^xOz~1T0N{DS6@+2$YtuQ>f7ph z^@4g)eMkLJ{Yd>({Y?Ev{a*c3{Y$;0-qmbq2Ca$KRBNWS(pqainn`P``D*R8AT3yn z(4w{OTC$d+_0#%mL$wjwL~W9mp=D}$T9H<)m1-5*JZ-+VKwGLU)0S)NwGG-vZMU{Z zdsy46?b9C7_G^!72ehZOL)!D&5$#p&q;^VsO*^Z7qFvTL)jrccw`pH!Uus`zUu!>W z*R)&Of3!QgL2sr@dP}{H?yW0&kRGge&_nc4y`vtbN9!qis@_BIsrS-*>jU*cdYV2$ zAFEH&^YnbZK)33pdYL{$pRF&@m+H&(<@$PkgT7JUuDkSo`s4aR{b~KM{+#}zeq2AP zzdo+o-=nu-b9J`I4(}u(r3k6FkR}T$M@TCU89j?GYd8P;^V;F8`ALa!-J^oyV-ph= zPMRMVTfOA!Hs6e2FAa?UHEVvCgl=(BL2*g3aSJES?;6`J_E7wfA2&5-+^GGoW!po_ zFPR?gw%b!5HLWwPH*GL&G;K0%HdULpn6{d>nH<$C?gXX@sr93#9i|%7PE)O^&a}() zkg49}G`UO-Lh=xjw~%xp`3WgVNWnr16H+H3brw>bkh)%^CAs90Z+e(QdpptWcgK8# zA*J~}+oRP{ciUC}bth)%llQ!T+R5>bpA$W&puDgsy0p9`xpU{lxWuG{xY&Ww1N#pc z7~Q37|Agr7efso^9uV6#F0Oy~egoqZV;_WhruwD3?W#B2jcIYDky4`i+&aO~$MBZJY%sLjddLvd;^IAftF7M|`n0%o@1Tt% z-e5@4ThSWL^oi+Hd#clPSxBBv(`Q2Ra*UP?Awj0E>^b#rG51hcOy9Xt-wMgcY5HDB zrbbkI(@*vsr|D-Q$?x*L=@-*=T5^>_uWZvz(=F4Z4zDJL4vHcqO*H$@MOvEvF#X4L z+c47fr(-}9L#KGvTe^S+tLcvEZsv&eA=3(r=8Vd#n&Oh7{GWzKl-xva>ZosGXxB_` zCQFW&ni!I8atpcD-}2?wLTY<~mbB&@+0&lml)Z%H`!8j(Y`SH7(oPE@wQCgi=RbtW zZH1(Id;C3a*-s9zr#fYSA^AJyKp_R>c>AUol@=9L6&3a`FL2~Y2F=#N9V$dff&UH_ zVY+2{rZH6e#!z?u87j&hs?~!~o#nU(Q|wN}16NnM+utrZNk|>;4>H9a*_JA#kbj5n zZMtPT))+d}9a?Jg&(H(hp_@P8A1n`jz&}h#;rIDRxcNs4DdOMs>84wzR~zX&3Y(k1 z*+2Owy7`R{_%r3~2mF(T6nURN&&{7NB=f)di%qvouQ&2XHOhCB$rjTerrUC4k2~>H@~~AFE#Qv zmbLT`HkZ8HJ(>m~b$7~pgp}+UXf$;2+b=(6Pj$(U$_Ip$Dx@AR`EmIPA@vkeFUJz2 zA-}`3@)3J#gM3(iPJUiUeT39kNc|e*7vvXPtrAjyA!R$7H#bxaI$=-QEx#(Cluya8 z32A_kh6`zwkj6e3&l&kGH`7`94f#zW4HVKKAq{Sj&&hAg=Y=#xNJE7*%<*z_!!Fy0 z^5y>v`lsfB3&huF5~kKM85H zkj4lpJ*3yQdn5$}g*73lv2>ct)8!jNiaGpM3xlOe>Mi*%d+NjTS^0PQ5BW{`w)|(K z{Bc4WFQmajnjoZ!56gE9ih^P=CGN#H&Fg*CPlc4(Se1&W;&rb8i(HB)@;a_4 z_7o>x_NIRiRcWgP*;Dr_zDhgA&$LkqPy&_qLdq3VzK{wbMVL9G5%{YO9oc<`O&A=PnKLRC?b#p6Z76b)Q}R z>BZKLm7WHZ$AEh->)*tUuFpLUvN}{rvnRQfVajkJl?iE@OBtbzbfkG1I{S@RCb?Y` zl!-#B5K^T}$xt$dR3)UD4lgf5C%=59$PFlP17Pgj6l0EkfEVq-{cS2x+^Jb_l6vAJNKrOQrD8lr}(VQRP

|X|3F&bmJt3qg zg>+CzPYLOeke(LOGy7Dt8l^_7F=}TuR*h5Rh4ieDUKY|@LOLg;&xG`~kgf@1Yez55 z(9Y3BGh9vUp$@R8?p1rLz0}@nAGNRAPwg+H!$NvaNY4xDh>%_o(u+bmx|dL*)xm^v zRvkvP-McZFFWJ5Ai9$Lini+DuLmYX!L351O4ew1IZ%?VeKXsGrDGxTFYL;5)j+m`Z zR&&%`HBZe~3xsrBNUsR#gpghp(n%qm64GmX2`9Q$i8@7{s#;X5J=-18J+Y^S^tzDF z3h51Z%wS)G=6JZR;f8G{Ee#{oSy>~}$K*{bDe6}~b6S;<&NQyg>Kt`$#(%dO=|xp8 zbspvaIQBMNQ16|-P)KjyJHw{hbG&{2-eolIY150U8dRY!b}#P#b~)O%GbHCNS2x;I zA5mARE7eu%YITjeR$ZsAS2qahZ6Tc((gh)16w*6FdRIvA3F&Ovd`{_~lsE7V`sC(3B-3RyURa2i=kJ?ik)FbK(>Wf19 zTu5IC>B|Q7rRMJ32f*VJFs>*@_5{UW66Lb@TOUxjp2 zNVkOa+g|l1(duvN??e-=-WJmDjpF_g#wP9;%?!fWa`KS=100_P7_OQ$qt?QneM`i^rutv7SdmnrcSFWswmDYDEeEarf8ZQqzdVdQ`3cX zcYJ#J%!+>^{51EiiRLeiIJH1wG)yeZn>sC~VEXh?_i5T_AzIi24dKGr^kVw`dpfO? zW`3X}N*LW+_Nm1qf0By8^+TaJeh6v+>`yg$&mi9mS(MD-w z?WqmgXl;y^E{tu2(L)$L8;I7%YZDqL-AfpKa=aZSL52l^*;>xuiPv(4(ffW(O)Jm} z-Q^KBdWvTGd$rcA!YKcD$5Z8Xf?tZD2(leF-RDLg|UM$h6rP*Fox|TTB{?JaIIc*YA&szaWcZ~ z-u7%^94?HLg)v7MOC04L410YZ*Pe7I`w3x;aB2sIv7;j*#1P;A8SSv!{H!o`a%#^B zW8~Phys6V34EjawWqVSCc2s*wJ0^@~VT=;S=mzaL;o1pdjB!UA>Zl7b6z0Be&v9yJ zgt7DelQr#4?Ol87UhOUIoc6YMUb~=O)ZP)sSYeD4#&}^&5XLUT*i{%4_iFEH?`xMd zf6ZU}Nc&hAy9r~GF!mQl8h-{0;}Az)sNsOmH`*2LTUu)0Y2RxDdk;MH=6VY|!q`_B z`?!m#m=eA9HV;Gh4vkB#?xB0;c+36AkL_PxFte;^TGjCKg1oA!<a2IQr|!{X z^*B9VPtdyvW129I5XO=MNbyeSHd_(7{}Tt3*&f4%Z`RIt^4Zz z?Ws<^pD?C7iaQ#jS`X2OxeY^wah#*BqakGYD1FS|O{b>|KKXyUMbFW5g>jNFW;oh(G6ee-=~Mm%n5tWZF-sV;9fLa=+S|(Y>Ho8m z=#~1+2O4GxWA1$obKM%|>GOp#PZ$dyXjr8GtEKhD`VwKx7si4~L;4T6w-XHMJ+Jb11g>-1fQ5xT=LQm=Pxi8PF~O%=xJ!dPMFUG497&>QsK?sgzt7|Vsx za<3We+4lzcjlSQW)1W`9AJ88YMyoKE3S-&d4ROBJDB zg@zICU%g&`*^YZm6~Z{HpDkQ zqn~y3jxvNdb?I+7dPW&io1W9(b}WoCMA*#W5eFVU!D9e;q=Cmo@W=p<9Pr2kj{@+R z0v;CdC^}z~el4Tm+AI!Q*}K_y9aU0*_C?<5TeX96Y`RkFUYw3V7TB zPr$PY*gTtoXLImu1)d(@=?$JTcxvG33!eVqnE;-nz|#Vrv%zyccpeAOi{N0H1;2GZK8pflmhb_as4DhK0p9SEv2z<_i&t>rW41B%-pRd5@NAUR>Ox?iLA4~(mG#E^_pO3S zfaysvJq4zx!SpPcj)Cbo$ZbIO1=$be0Fc{*+!5qRkmrEB4CECcuL5}u$Xh_(2J#h< ze+Bs#$iIX9A5fZtB7rg(Y|1!LCV-L;N+Br4piBY90!k?;vq6~)%6w23f?@+jfN~I& zk3nq>YBH!6P?v*R4eC}jYX1Xepre0Ie@*{XrWD+Az@4KpP3#M9}g;n-1D+ z&}^Ws1Z@{+dq6wxHk<+N4baYkb{@1ZK)VXsEzp~Q-VAgj=q*5R1$rjvg`m#>y%O|! zpf3P@5$JZ%SAf0>^fjQb1AQy#wV)pd{dLg42K^_{uYqp60s2j7+XUJ+gSI}p2yL^V?Hp+P5VSoBZNCQJR^TgxuL{1A z;2Q5s`zCFQr5cm!O-#qZOfNv@IE(hNY;JXQY9|PZK z!S^}v{Sf(XU2|yX4edL5m=$27($OXb%MKgP{Eo^a2DOg`i^)@FfWT5Q0C34o#p#Aaoc29jwq{ z4Rm-CIy?^@UVsj7K!>-W!`sl|0(5u>I=lxRE_UWTv}5Oxy6PD9uk2)hhnS0L;=2>S`bu0hy!2)hg61_(Do zcnb(`1>s&0?gQa6ga<=-7aN54fbgLZJ{-bFLwGubkAv_G2+xA>$q-%w;Y%QVErhR! z@J$e24dJ^X{9y>+2jTl6`~ZaChKObm;R6veM6`nle~1W#h#-iFfQU{IVTK4>G(>cT zhzy9xhloWGu>vAiLBv{!SPv09A)*c<_Cv%0hxeu88gLx2`hk`i`%%i}Z zjt%p8Fi!$=7MOFuoDb$AFi!!q70lDXJOj*CV4e-;d0<`$W;>XdfO$EXSAlsgm^XlV zGnlu6c{`YQf_WF1onYP#=DlFv59Y_f{3MtU*}(iPn4bspi(ozm=2yUc63nN;d=|`a zf%!a`-vRUcVEz!ypMd!@FnTFh|(a+7oz+jsy#$?fT%Et>IhM0h>C%zIEd;3QQaUa8KQbXRBwpt2T=nd zY6wIPhp3SdH3p(GAu8J)0HRhx)M|)wLDZuV^%z9G22pQ=E$RY9{SMIvh;9ne!4Mq@ z(NPdR7@|i*bUH+rL-ZVoo(It`h<+5JAA{((Ao_iX{s5wXgBT#D3B*K0Od`Z2K}_cg>_f%p~>?*s8N z#79DW0>pQP_|Xub0r6Q7zZBxvLHq`YKMe74P)zGCHx@?6mhoQ^M(B&2A@&$DH0lNGMU3KUh1YJ8o*CgoL2fFryt`*RA zK6G6OT_1w3`=IN7==uh9eHXgE4_&W8*FT`^ZAc7-#ArzD42gpvaTGQZ$3Ws#NUVUw zDo9)pi4I8I0g1;U@eCxs0f}Ej;!lux4Z8V2w|3CYAG-C1ZbP8kFz8ka-DX0!+0bnh zbgO}Gwb1Q3=yn{soq%pvpqrcHIwX18AW4TLUr0)Zq<)Yz0FufeX%-~Sfux;~v>TEh zhNM%FbPkfvL(*O7-U7O}g6=WUy&H7z4&5g~_k8GH2;BvAUk%;YLieN4{SXhg1b@sR5AM9#RKE>L^GZ1F5qi z)efnPA$1?59)#3Gkoqp9UWU}qphtV?(Ghw?LXWA?qXK$VL62R~V=wf01bUo>9`8Vp z_n^n`(9-}tn?lb(=otY$J7J?|Kj=9edX9jeGoj}q=xK+ZPUyKGdLDqD??BH_py#L1 zs|oaK4ZS>|S1k1E4!u&KR~Gatf?g%iYYp_;0=>3DuMeTum(c5L=p75ayF>33=)C}X zFS9}K70~-4^!^xnUxq$`&?f@=bb>y!p^qKiw{|(T86ZAh1{Xc~MAHx7&7|;O*gu;M)7+{3~Wia3g81NhnI06H%!+_f`;4c{1 z6$bW%fxThiTo||*1}=qxufo7L!8Y(53~C93OfX1+K^ZWp00tGopod}56ENr?47v(~ zZo;77VDJDKJOT!fg2CHh@Ix@z34^b|;A=4WIt=LzLx#YRVK8I`4A}@nHp7s!FytK= z@*WIriH)Hq7^=X~kuY>349$R{bujc{7`hLJeg;FogP}jbuoxKD4Tg1xVRK>FVi>j* zhP?{I-h^T2V7LUsJz=;v44(wU^I>=)3_k$FpMl|rAx(j_07z?ZgS28uD~GfhkoG*J zy#i^k!iY97LWL1Jj7WnK<6*=^81V>7+V5kr^DC^82dPkJq%-?hp~5HTyq%L62?t}arrQ=5XK#cac5xM z8!$c$#>c?;SQuXk;}^jAMKJy~82>hmzW@_dm=FjPf?z@cOen?1glRD0F_`cyOn43^ z{00+&iA`W)Pnb9eCJup#t6<`0n79Qdz6}#UfQcW$q;@bV1SW;Sq!O4k9VS)4q!(e* zNtpB+WXO==4;g`wkpmf1Aj1L~58EK)5M(?98P7t-VaRwDGEPCpdC0g38Sg^I`;hSg zWPA)6S0Lki$oLU5u0h5P$hZZW;gH!EGDkw@XviE3nd2dI5@cpUW)WnTKxQdqmP2L* zWZGWUhqF)sVRXGB-iy7RaoF%>9sg05T6k<{`*@1~QL8=8KSd3^Gqc=2wvU zBV_&znb#rnCS(~Qt0iQ$hAdCW@`0>u$SQ`cO30cCwyXt^WrHjMSxX^n4P>o{tWA)$ z1+r=(>o{bcg{&)(bq%tvLzX+h?~pA)c5}#9AX|g%wvgQpvi%`D5VC_HI~uZMAv*!G zyFzw1$nFl=DcH#F57`4DI}@@eLv|iy7eaOkWLqG+46>&~b|qxbg6z4Fy%@5WLUt`= z?}6;SkbNAo&p`GYko_%W{{q=JU~)T{90HTWU~*rWJPanM!Q?40d4>%pSHk3jF!=~f zei0`B1vy5@X#qKdAZHZhjDego$e9H>b0B9fgmS3>SO$lUmhFomx6|0U#Kh5VnOpfwaIP@q9U926u&K`Io? zgMuYcunY=LLBTmFI1h#1Q0NPVeo&YQg}tD#4;1!=!jVuo8VbiiVLBAra-lFE3d^8y zIuuqy;VdYe3xx}ya0L{uhQf7FxDg7gp>P`%o`S+(peP86MnaJdigrTLJ}BA`MUO$z zlTdUBik^j{Gf;F9ir$5y525H2DEbU+ML$B(&rtL$6#WK8|AC^rP;7wWW>9Q`;xH(V zf#MV>?g7Ppp?Clk4~F72C>{yLW1u(_if2Qy9f}u2@iHi03B^00xE6|cL9r8xcSG?_ zD855$N?JpS2b3s|^MehtEgVWZK}i&pBtXeTD5-*yMNqN=N>)M1S}557CACnp3rZe^ zlEQ;mK|WJ1yY3pnO0=(!n|&tdqc+1=bv}=7Y5etW&^h1?x1h z&H!r_SZ9NE9#|KG)ehDrU|kN@RbX8U)(v3Y4A!k+-452BVBG~)Cs=obbuU==gY_}6 zJ_$DKA+SCR*5|?cB3O@s^%bz51nX(Ao(1b$U_B4kcfk5SSU&{oCt&>ytY3ol8?b%{ z)~jIs8LZdAdK0X_gY`C8??9;mN}EAxb0}>Er5;e~jg3+nN;N3;g;IYgZ4ad#pfn6h zJ3^@$N@Jij4obT~X*VcMhSDBT+8avyLFqs!9Rj7pp>!maj)Br~P&yGxGof@cl;%Nc zA(WOtsRc^Qpme$oN-Lps7L?9~(gjfJwk?LzWl*{jO4mT?7AV~YrO!d>aVR|jr5{1* zS5W#5lmTTep{zBO#X(szl%+yh29y;*SrL>8C|eC>YoTl(lpTaJ+aW0X2+F>KvTtBo zbC~80(@Zce7N&KFX(=$R2&PSgY13iaA(-|eOnV8YeGk)az_goC9tq_MP~H{FCqj81 zlovqx8Ytfa<=deA2$Y|I@{>^hE7;2ag7UjCJshTYhUsxIeHct13)9EL^qDYy5lpwk z^oL;jKA65Ark{rC=VAIqn0^OlG=~{2VMa8}NQ4&Fyk=H zco}BAf{huU!i;ZW#`jRs8Y&d1(4Zn3DiWb02`a`yMK)CAK*b8E*a#Jyq2dTsoPdgx zQ1LlbxH+yur3{t+P#Fl7-J!BCRQ89;e5kZSWf@eifyym5sN4pX&p_o%Q28=cUWUpm zQ28BHwSp=cs#K`z3{^=`l?+w+P-TUxGN{@BRokJe2C9xg)d{FN2~|Hs)$dUCADHP4 zGksyEAIuyIGt*(_IGAah4KwX9b1}@^4Kp8unNPsX(=hWq%)AIQ|Abi*%xVs^+QY1l zFe?&f^?_MKVb*Y%RSdJrVb%@=7?5@t_@*%dIm3TAJF**jqNPMCcRW}k-HXJGa>F#Bhi{R_-7!5lxB6997t zz?=~)*5iGm~3-7`r11$1`MWL`L92TX)qVce3 zA}pE(br&W0X83SKa988-0#%)09!Ax6@aZ2Y}3H@ z6xdz>+flH$0J{&^Ww4J0dp6i}!2T%Mp9cH0V7~?SyC4i8MuM0KA_K(3Af5nm5X4VDUq+*a?e2gT>#$;vZm1G%QJkB}uTP5|%80C5vFmE3o7& zEO`@_s<1Q=mIlGn$*{BpmQIDGkHFHWVCmDa^cPtAA6WV)EK7!E{b1PuShm6j%QnKY z&9LkYEV~HH-i75Juv~-XZDILnSe^mPvtW5GEZ+mm_rmheVfpv4{3@)7hZQNXq6e&4 z1S?j+idC@URao&RtT+cN17Kx1tn3IYr@_kEu+lacRvv?ur(xw8Sk)F*1;eTkSd{~- zrobu-tZIN&2Vm9Xuf^Bb1gtT_8ZTJm z18YXZnhaQz1#4=-wq_5k*$Znvfi>U2nr~rk9IQ=-wW+XnDXd)wYd65!tFZPato;qv z^@MeUVBHW{w+YtOz`9yk_Z6)B5!U?->%(Au46KiZ^#azfhV^S<{Y6;+F|5CgjSU@P zLmX^KfDH>^!!p>g0ydn54e!8)_h4ffY>a`8v9NIlY@7!h7r@42u<2LUVB0>}wjZ|r2-|MKw%@^#0*?OR z7zmDK;8+iijo>&3j!WS95VmWuy*+FXhV6N<-2&T7Vf#MVeh{`Fg6-E~`)#mo{|k2X zf*pfl$57a@1$NZIj)!2!53u7`*l`PLxZepY6cc{4yHFscV4D6f)J6FQa!?5!T z)Ha1$Z>TjvtqQemq1F#-1EDq^YEz)L2h{e3+5u2I7;48s?F6XJg4!IY&4=0&sI@?C z8Pv{)+VxPoVItIShuS?*yBBH?K?l*?L1=Kd2iBb!kvH7V73g-Bzf32y1x^qx>0qWj`x=T>^5!79Vx*wqKC#d@c>f91;Lfvmr_Xq5154%!eS3c}o z3ASDPVAq?l>pj?Y3F@<puQODr$T)x)R#kj1=P=k`Z-WPALUThWEz~~*^)9I21NHl${!yrZ92@lqq5f&8KMeIpp#CV-zYO&! zp#BupzYg_pK>azWzX0{`LcLq)M^Jwm>OY72ub}=4)PE24KSKRAsJ{XAx1jzHsQ(k{ z?}D=lI3;kl0B37(y5H_{`hZgbrw-0`;0yp~5I94?84k`);EV!iXK==Yvnx20z?lNh zp5W{Q&i>#W1kR!0OatdAaHfNEJUAzTGYgzK;LHbS5jdxS(+W=ah159%oK@hQ4bFMs zTnJ7(IG2EPIXG8=b1gVGfO9iAw}Nv!ICp|`7dV~Z+zrmX;M@<+$H4g{I1hpIS#UlN z&KJRX44kik^CUP=gYzudoNs~iJUHJ0=lkIN5S*WY^D}UM3C?f8`5ic~g7ar^UI*t* zaQ+U?+u*zdE(5rlfvY*VT7k<0T;AaR?*lFkT)yD)2UmM=bpTfwxH^K%j15-|xZ=Ro z1zg?0l?<*P;OY&oe&8Ait|8zW4z7{l8UwCz;F<`oOmIyGS01rHUI2d+!t zx(2R4z;zoMRA>lWys=Pu;SGQM2KmxmeLmchSb@Ro9YZ=X4p^r24NrryO(9sMX%g`Sgx`Lsr7~06tLlTA_W>_x6iWpYH zu(KF;A;T_W*c}XekYNuo>=T9!X4p`MEn(PdhOK2-Gs6xuJQ!Zc@M9Q$9K$bW_|*)* zmf?>w{8@$%V)$naAI|WR3}2LB_%epCV0Z(=4>G)!5q%g@$cSP_oWqEV7;yPbdD&!`s}HHJ}B7&VPi z<&0X#s11y2Wpo=xw`24`MxVgwlNfz1iP5((`c_6i!|0b7{R*Q8GkOf8$1!?2qc<>m z6Qh4-ObTPt7&Cw|M>FPF#{7vfe__mRjCq+cZ!_jy#!O_)OvcP+Obuh27}LVoY{nKa zw!eh2=P~vY#$LwQdl~yEV;^Vi2*yri>}1BSW9)XuRx+*|<8m37&$#m$cNya@XWS!< zOEB&qj2pqYiHw`fxJ`_!X53E3r!u|+<1-k4BIAFPVEkE(zl-q?G5!(8|Bvw_7(a^f zKQev=<5w{unD8qmv}eNcOgNnhXENbdCfvh>`=J(nDir)YMHc;Ne7tRo5}r|T*&0#Gx?7sCSSqi$C&&NCO^mI z1x#MbTs*tbl;HwAu>LI?G%U9p?)elTLf+<~?lEsu;nQ{+P?qkXfrp#x` zcTDZV)ZR?(!_+&O`gf*2%+!$*rcPq&S4?eUYAe$)?JTBU$h3=?_A=AnX4<<CZ5IHq#d{{d;DlF{2|hGMRA+Gp=FApP2CpGX^tbXo4A= zm{HA)oy^Q>*lDVsym(9Ea=JjXZUzv9w^B!Q{c;-!K z-b@nn_A>8h<{jee^Z5D_zP^mFKjiDr`T9$~3BLIi-?ZnO%lYQdd~*Ze4B?xxd^4VJ z_A@_;`6h41g;`+N9)Hs3Gc`|nv4Ecz9T z+Oy~i7G2Mx8(H)*i@s#h5EkuXQ6r0*`Qbc%xP%`r-NU#nN^xJ%XiwVCj`C zy_%)(vh)*{e#+80EM3UbMJ!#+(xoh2!_svuZJ{iMvNX!tP}Y{Ru9S77>>5p*zef3+l)p{+ zzkbQTPx(ib|A+F=DF1@;A(RiNd=%wlDW5?3WXh*fK7;bvl+UAlKIIE2Uqtz0%9m2U zjPjL~uc3TB<(nw4p?nYJ^^`YIevtB3mYqyu*?BBGpJf-a>>`$3#jaRMt&V#U3zc#;)Qv*KA+3}VG=tayVJ3s|w7 z6)Rb>h862rv7Hr_tSn~biL5-Cm8Y@t3|79#%6D1$9xFdcu<|2TZe?XHE9+U=$jW9` zox!RLS#=SsE@jmpS@jXChO%lnt46VE46B-1oy_V~R<~vKuUOrg)m>TrCszNB)pxM^ zZdTvR>PK1qIIHKfIgBB7!0Jt`>BE{*)*Q*2V_0(>YffX$8LWAjHG^35 zJZoNJ%`2>Vhc*9V&05w}v1SKrYFM*}H7%^!&)TC|dpc{+WbIk3J%_~Fi&%RJYd>J^ zVAc+0?FiP6V(nzsPGRi<*0o_>JJz*lT?f{6V_gr{UCg@cSa&_^Zeranth*la-9_!|_?mN~kVO<&P+p@l^g!SE6-;?#dSl^HJ16Y3->mOtN6Rdxl^$FI$%=%YZ zKbQ53S-*t!<*Z-M`VFk##D=bH$Y(Jcrn3- zm)Y!g3Y%YJ^S{{qZ#Gx6xrxm!Y(Bu| zgKSA9v84@Le#e$8*m4zHu4T)0Y`K*!e`U)jY#Gj$k!%^mmT_#E#+DguJ%X*>+1it> zz1iA_t%YnYX6xN-eVnaNvh^9ZKFiiu*!mh%t?QORGi*4_-?IX7RhiyaHHk@sv*fy4J6WF$xZH;VC zVtX;$k7N69*#29#Uyxw?)oj0(?YFS~Hn!i+_Pf}AFWVnw`x9(`n(fcB{W-RO%Jy&B zzKrcZQ;|YN2P(Q!kxxYd6$7a_nu_D7IDv|jsW^*@bE!C=iVLZ@n2N;JRNP9%eN;R} z#WPgAM8&IAyg|i3sd$%)PpJ4W6`xZvn2KRkjHhBY6$`0YLB%R6HdC>Uib^VWP*Fog z6BYZY_=RjMl|f}MD$gcSc^Q?DQ#pvr*Qk7#%Fn3$oXU|@j-hfqm6NEPLgid4zoGIw zD!-@lM=DoPxsl3URPLs-nacfC9;EUxRY_DGK~)E;GO6l9RX3_~B~%qrbsSZ{q3SHE zE~M&WsxG7I3aYNA>L#lGLe*cXx`V2_sd|{IK~%j?)rVAlOx2fE4W()XRimjIN7Xc{ zW>PhWs;{Z~ma3nqT1D071Xa7Ks-f12mFjs^|3vjts@G7xp6X3hZ>73|>OEA~Q{6~)3)KhM(Ul#ivEu@E zJjjkG+3`F(-e$+U?D$39+3{a?e9n%+>=?$5@$8tyjw$Sz&W>5^_>LV**s-1+8`)9C zj-Bk-&5k;DG_d0!I}WomiJhtJY|GBR?7V=T*Rb#yv(pIuM0E5WXp+4UN`-elL? z?D`kG{==@%*!2awhOlcmyXLcN7rXW+sL7+In406MIfI%Dskw-ntEjn_nmBQ>{B za~C!DQu6>c4^i_dHP2G>1~uwncX+C z`xbWJ#_rqM{Q$ckV)vu$euCXkv-<;f&mpmUF}s`C-O8T!?CHjy0`~N0&r$3-mOZ~_ z&q?e#l|ARM=XdP6fISzn=Ti3E#h!QA^BH>b4RYSXA~M{Rp* zf0_Ta-Kou{Hiz0gYKy5oj@r|x{XMl8QhNopS5x~ZYOkmECTj1X_HJtLqxL~+AEx#h zYG0!EZE8QI_A_dSQ#*>x-MO`22@~JDK?s)1>r|tsk{y^Q8)Llc}b=2KJ-Obe9N!>lv-A~=$ zsY^Wai=*yk>fWaAQ|dmWZa8(Ls2fY&1nMSJH=DY7)Xk@E0dUL9COWiM| zf2OXL`k+3A`gH0usP9aD7WF--&!_%K67?rie-8EMQGW^bf296O>aU^xI_htw{%_RZ zN&P+4-%tH()K8#(F7?&a*Hhoh-XqwX&fYBc_F!)>_V!_KK6^{pdn9|0Vej$mJ(0bC zl(6@4_P)g4vFx47-udiZ%-*%^UC-VM_ExiZ7kl@xx1PNR*n5ZuG$hlIMngv$vT5i? z!?83RPs8an{Dy|JX*iFD-_!6%8m^?_8XB%k&~O6{f1}|ce!2E*c%6nfX?UN8k7)Q0 z4WH5Q1r4KU7)!$h8Ya^)m4;<9r$y(71@k#Wb#@aSe^@Y1~BPRvLHF z*hJ%Dn%dLUfu?RW^`xmcO}RAnrKyyrqi8yoMANTnI*F#=(sUtBSJHF~O}El?4^8*e z^mm#bq3Lm&2GR5aO)t~*8clD~^gc~r(lnN)nKaF&X#q`(Xj)9uQks_0w4SC-G;O7+ zf~IN-P4zS#pgEc544OO9oK15M&3QBz&^&ndZ}IK9}Z;X}+4~TWS6) z&G*s#Ak7cc{20wo()>KlFVXxe&2P~BPntiZ`HKY2qiLQ_^Guq*rFkLEKhXRW&1E#N zrFjF*n`z!gb0y8SG#{iTjh4=|bfqPSmONStXc<6DF)hc@asn+U({dUuXVP*$EmzQT zBQ3ui)3n@6%cHbBLCe#$JWI=Sw7gEsTeQ4G%X_qZK+FGV89~bwTBgzRH7(!LvXGV^ zX!(hjRkW<7WdkjnY1u|g4J}RVOJZLN`_kE$L1JHL_GPiJ2mA8b*N=UL>?>j4k?cE_ zeW$bUH|#r$eHXItBKECeUnTph*|&>*yV=*wzJ2U}iv6#!|26i%$^L(`|3mhF%>LEv zuV8-_`*%v%U&H<;_P220IS%}j1MhI)Jq~=pfsZ-xDF>!=;2REn%YlU)_<;lKIncy` zeH{3i1FigwpI_(a_xSl!exAk8^Z9ujKkw&Y8VB2Q@CXk6qW>Pus8k7)f5t)J2Q1+7D99Zu^gTF25lf!4{iPUZdNMxIn}oMEpaE9yh1CjKxNcvPHO%zGriKL|> zsazy26GNFFJY$B5)9B6*rfo+*;&h~(uWd8J5RC6d>QiC0(QxiIihS%JCxQM3HieNV!C$TqaWfC{nHzDc6XU>qN@! zBIPcTa*s%PLZm$POGA+|NTj?VQeGA*?}?Q6MaoAaV zsf8l7SfrMS)FVaeF(UPNk@{PadX7jvPo!QhQm>SV)N4fQbt3f!k$SU8y;G##EmH3j zsSk?Omqh9-BK1{~`i4mTr$~KQq<$t+Cy3O^B6X@roi0+p5vf0j)SpCZnMhqLQmaL3 ztw?PaX~`ljRiw2QX-9~(4hfN#Dbfatv{OXdX(H`Rk#?3yJ6EKgFVZd(X_t$%t3=wh zBJEz0cE3n_K%_k+(jFCQPl&XaMB1w&?RAm%p-B5!qa*9HYK9Xk)q8pqRqLY&Gn+qAkpR((dOTx%?F~*e?^jAlgh8 zZKjGgyF}YG(YB9hTPWHVi?+v!wkL?TCyTbHiMD5owto<9FBWaD6m72&ZLbq;ZxC&7 z6K!u7ZSN9o?-gy|7j3^4ZRnPe~N<_QvqFuIVmm}KciFSpeU5RLS zq-b}HXm^Tece-fzJJIe6(e5hI?t0PgCeiLMqTOFbyL&{t`$fCIi*}EQb_vn$MbYjp z(e52_#9nd4VbMNB`wpUgrfA?R$#$y+!+6(SD$4f3#?StZ4sh(f%aS{uI&v zG|~QM(f&ozew=8(RWV z{;5d+Or(Dy(g%z5F(Q4ONUsvn9r$DyL*DA94O z=r}=ioFY0-7aeDbj^Bumi$up|qT_neafj%*OLVLg9UDZ)W{K$dv*_3=GD2ju5g8ps zMmLd>D>4R(jH5)x@gn0yk#UO1I8$VtB{I$x85fF-D@4ZiBI8bxakt2LKx8~5G9DEf zPl=3#$QUFtUK1Jr5*hy!8DEHuVIpJXFUc_?W1`6TN@PqE8FNI&LXlA>GB%2g%_5^h zWK@fcT_U4SWHgA3W|47NWVRKVT}5U$k=aXR_7R!+B6EPqEEbsqMdk@2^GuO>zR0{l zWZoz;ZxNY^+eGH=BJ(bhd9TQPKx957G9MM0Pl(K?Mdqs_^L3H=mdJctWPT(vKM|SB zMCN9Zxm9FVh|DUHStm00icUqM(+Q%}NutxKqSNW3(|MxP`J&U)qSLFQ)9WJ9=`GRe zZPDo?(diS>X}RdMMReLGI#r5J)uL0q=+q!Omx#_MiO#2p&KHW#7mLo9iOyGu&R2`h ze-fRq7oBeso&O>_KO{OoDmp(QIzKHsKPx&95}jWVonI22UzLc?gGJ{_qVsCexlwfK zE4rL3y4)sORMM_ zqHBui+EsMzCc5?zU3-bHeMHxM(Y2rGIv^ps7K^TDi>_CRuD6J;?})C0Mc3t`YnABQ zB)YbWtRqBLy2#29S)D~zmdNTMvhqb%fygQnS*0TDD3NuV$T~x0oh`D?6Is6(Sr?0} z%S6@{BI|mQb*IQm+%2*m5Lpk2tVc!GQ@?;BYmmr#O=Nu_vOW@7BSqF2ku_dqO%hpC zMAmeXHA`g86&MbEXO=Qh!^QuM49 z*~udNS0Xz{WEY9-KZxvWMfP|r8%q{vQ;5!vHK_9T%#MP!$Y>@6a@N%U$bdUY4QjuySn5xs5_y&e|5 zUKhRI6TLnVy*?DZJ{P^lie6udUNc3nZ$+<#qSyDLSGnl5T=ZHYdTkWFwuoL^MXz0= z*B+7RRV#WO6unwS?^Mydt>}G(=-ol|?kakB7rlFm-n~WdlSJ=JMDP1V@25oXXGHIp zMDJHc?>9v6e~RAkir$}y-v1T7KNr0Ri{8UT@9CoVR?)j&CXQk+~M)X-P`fL(?wu(NDA~!^CvdB#nx$Q)5 zdy(5wppq|kA}>YcwGnw8L|(SY>n-wf zMP6T#*I(oniM$g;-bo_wWRZ7@$orkh`@P7!RODSQ@~#qj*NVKGBqHxGBJZyv?+%go zmdKkb@-~RPog%MBZ;Jf)MgEr} ze~8E*De}jN{P80HE0I4<cuPFGtD0o&B3=##eh=SKe!CRu>9Z~S1DELGad@2h5Ckhse zf(lV^Nc2k+{o0CtM~HqMM89m&ueaz|D*7EI`W-9!{aW-pN%T8a^gBZ$`u$e)yHxbM zT=ct2^t)E{`?KhGqv&^w=y#jw_n_$ahUoX9=r>yQ`(E^`68#!P|Bj-6f#`pt=zpH* zf34{Mu;~Ao=>MeX|BUGW57GZQ(SNe&KU?&lEBY@G{TC%f|HY#JQqg~n=)Ya`uM+)t zivA5^KqoPvn;39{7;vT-aF!Twt{8B!7;u>waD^CfgBWm^7;ujmaK9MvcQN28G2j_7 zV6qr6TMSq(2CNYSYQ=zkqOh+hEEI)_%SGW$qVN_`c$X-=R}?-V3Lg@MkBY))Md5Rz z@I_JhiYRi{gEv_>d?eN|HrMTTya^DCrNB>D0xGaB!-HTnWAKlD48cp=8KXAqGYouO%|mcMQN5O%@?I5qVz~n zdWz?-Hf=iqZ!}=|iIQX;J#DD1A

dPoc$AO@C-fmexvH;RETh=H$)f#bx$uf@OxV&LNc_p>e~Nw@zSbu0US!2h=x Hxa9u;O))LD delta 30476 zcmaI62UJv7`~CkJ^^TP`w+uy)jxwNtf+%3`U9eK5NYR-Q5JAN@bL_pLjv|aa z@V4O{!-s~C44)dlG<;|H(eSI`55r%wB)5^hWgl6V1LTf!xEv*$YxlEoVSITqc1+raUCa;k<$(!XZHhGs^FYlHc&B5~n06iApb}x00r$D;dgQWr#9V8Kw+Z#w(dhmNG%HSaO zrY5L~YLePr?O{`s)fBatny!vf$ExGh@oJ`;rA|;Ms*}{IYMwexEl^9;QgxBKSY4vp zRJ$tFrRp+uxw=AKsjg8ssGHR~b%(l3ZBqBD`{YsTe)S>sg!-_0QazgMloqYU zX$e|yElo?;GPHr(P;HntMw_7JYI)i;tyC-1W@we#Y^_RLsjbphYiqQ%+B$8$wn5vd zZPT`EyR`;wzji=7s2$RdYG<{lv~$|i+B4d-+H>0T+Ij6w?JeznoA$AGN&8&8s(q<_ zt6kH6*8VbL)EQeCTN+y#+Zfv#jYfZCfHBY*Wb9}RH%1t{7-NkI#$;oLv5&E@aj0>a zakz1`G0Ql`SYRwP78%V(i_vPVHZCzPH?B2qFm5*18MhgC8XJu+~g_I(sG$BnDQnrwmp3uBYKJov#%?oX}Y-M$m+rR%CYy}gqzX!|lNtM2^vTe?8U zex0ssn^*2&+BA=MT+`_~x4x79O~-njF0fAhtFzHlL(TA>;eC6$%Wy$R9bASFgyiLz z-9i@`a?zgeGJGN=?|1peaLMo)ZTXI3pFG1ChAW1>juS0(;fjxt{6wppZ`0QBmEmi{ zH@dNgZyjH^&~+K->npv58H?e2!w)%QvWAxzm&_kmSUtmSxJJc4JMN%P{s6`StaGwqbjBsR?n;`Z!-LB_{CAwQrFvNxMBGHp4MAJlHZ~&?YV0B)1&sb zkd%L^m0K9D7!KKKBP6w1?5%%@m0JtR*H?enPMX|K?qE-M$?b(?bje;q^3V4T$SSdx zOs_5}9#k>iVQZmNZL*&|-z6J_6!7m{n&FD!f#zI+&AI;b&s;&CTg}fc$>EJaKl%IZ{(5kD2G$hDS5s^ zit=so7H#D*@>qG?ZAyn_S`%UMi%xe+ie( zhAW2i&AQ{8b$cfMu5aZEPhC3POIjsY-(#;4Qn$NxneU;m6;i^#=@%QW7+$ewd90S$ z%-`mp{7XIjUibJ{$gA%0d#3+hXq~*l6Iw5%9xi#Kkdht0=yl;ib@C2-x?A2RZx>Qe zA*H%yhrClry@b@;k=R;S6n>xFWKVCBow7@I3n^Vl8A9sQB=42?wOb>kzCxPfIN4fP zHS~x*ZLfS(en5UuJ|?7oLK-ThkwO}KPr(WK5f9VD@=5uWkopU0fRF|@$*1K<~g3!Ab`dCQOk8EwLGq>pXseIL*zF&S={!G3spOU|juQY2MC#3O0 z8YrYpA!Y5Cztk!6*YY5fZp$T#m8vcxUlpd#PbcS1#RVQpw(b!bxW7gEtFZSCm?lom=$rIlfmqE}igZG@B~q&y+zi&i855K^I#rVFX$ zfYMHBuXIqn6mP{x@fA{;kgP(QEuQD?OD|A(@3_ zap-(?U4r^513Z9!N`E2E5>mNa8K?{rQiYHz?*K+BV?2OS9zazyV5~AuNOOc#?ReBz z7alZ8nffn4wvr>Hxk8%fIP0tH7*wE4_W%k#47JS+#Y%~g76@r!Gef3DDgT#-3Z+s= zi-oi#qTlk(%{GhZx2m$aad*->C^gFbe+{cu76{2MB;knn(*}iJ;yCS?UE6cn- z@jC1EfwGE;hV4RHCZrXj1X9Ji+*R1PVJmHU+=%2DM3ma8b_=ONNR2|;bC75vm3Ni*lncuHL@FQJ^E~|b3CSs> z{X*Z~?kn_~BUaU2v?*7Wuk7irc2h))vo7UpA-Vp$8kO(tX>R3vFla0qQ_?kUCgMXN2^akRBJ(6GD1YNN0ug)B&Q2Z&#*{P)Dkx z)Y0}lPa01p&k5;iAw4Uk=N{P-pzEN^oJ?EY7*3tJNBHF7fJowU&5w zVRM<@64Kj3dbhd#o{%myH{N$V5~Tacwn|+sq<5TmRIF9k<^Fww$ttOCQrD{MshBt> zE32fsxwPskb<-WZ=a_m<-Fmm?>MC{n-vM=%y3=!U*Q1)cTfNVo-lR6Djp`mDeJG@l zg!FNf>TK;PxR5S7{DXCc-43cp?df~fL+WAme)WiuJ{8g>A$=yK%X`%ah*yuP$A$EH zv;3<<`qFVQSZ7W+t)8)`y3|L7^o2`(Oh{MqeSL-x8a%1cT2nHo|C|X`B_;QCoK>H4 zNa4B&TL<-7_4#|_&I{?Qd#IPxm+k4!f5?ALea~Z{*VQ-FH`TY)x7BylcZKwgkiHet zcS8DJNIwYanvi}xpk5$e{XqSYc;eNILi(v$?R6pj;c2we34M!c!v_s^d=aAi&Zd5) ze&f;bwUB;xsox6em;XQQ*VLcv=`Qt0A^qx7uM6q7sWZ!~OR7o>r~l&ye^YOIKsSVR z!=?T%q??nnDr&0!iTG3f%Y(QrB-`&U4I$l{GOKWAdDrQcmE-P~po1o9t^Zc0wGoo% z=->Q~s%h;tufKI@-a`8GUdT_=JQH|BLsK+WNH>IZOGvkc^w(aE=I_Afo?D*ZdL53} zLv_KnPFlD<-L0X8-Rbr;J-UQ+YtqbFg=Hmk?q=wqb=G3<=`soZy|andRqOUYv!*3! zDfaXxt-IDkOBQ-b==DP1x`}u#RqNF}6>WsRUB0iw5~f=e(pT&Mw;i+rLf`goPE8xE z4e^*JYWxUo)ZbgPHd^RA{CB(2#@W-FwDDS|mL>FFLhmc|^4;Mbv`L!hx&M#-SXvehU+C$n2?P2Ytc1q~G2z{*3n}j}2=;MXHtI&5lpqR0O_JQ`HdOLe~aY=dg z%+i@9RYIREq~Du2P3@xgiNhG9>(tV%edsSa11PFA(A zz1P?g`re}Dod<^Yo%X%s$rzonmG%P_o|fjg7Ne6|X+LT|Ic~=2;#z3e9nqb2DXp|$ zwOfvY&N}}=+8^3~h}UileTLBYX`aHqp5y+{ZC;%D_W$t#QBq=DXu{&W;+bDm7Lg+`?rwRQSM^+bIX8RDM=hFaVC!rtdIMziM*FMS^ zSZOrrkSMq7be4)=4`W%P1Nf$h{*jV;2 zzzpL|q0bZgX?J%0K_x{sWo0E*cQlsYIk>%JoMkMht+B#bNwKlYILA?C(v1o&@c73f z^fTTw))?m+=NaeI)>vy?pc`YHqZ?~nS-=25Z1uO0OO~zHm)y6eKKV9gHg}&tP6>!(~cNWC|THLs`+2W=DcqloJ#Oc}$ z|EJskM=iy=F`ggeB4dLc&kdP{e&$^r_j%k{Df9)+tIX&&HaY%`)5W#iONFNq+xAZX ze&Yd0e!Q-8%Y#&S8qtnp@j9jD{l+7X^YOYETM&4Kfmc`XN(QeC@EQhQBfx7cc#Q|I zEby8FUfJN42VT|SwHv$+g4bd2ItO0QfY)>2bsoH41h1FD>s9c29lYKIueZVLUGTa9 zUO#}>kKlD3Y+k>B*A4Kx1zvxGH{jh8y!GJS7Q8!vcLaF%1Mh6`E(7l+;C&ptp8@aB z!TUD&_=8V3@JRrl4Djg-KEuFg1o(^spE2Mw4t%D9PY(D@1D`@{_!NWB9`HF2KA(fH z7x)H(Z%6Qr2j6bsn+U$$!8aLvdxCE-@J$2X4DcNTzT?0*6MQFt?H*`Oi(PK%mSqX zl-Z!TKzRj}KR}HIH5=5qpe_S-1*oe*T?^_4P&a|v1nLn`9{}|Ps3$>v1RLtJpgs@k z%b>mr>g%As1L}LAz7OhEP=5#Y7HBO&>i}8+Xd$4PK#K=03A7%drGSz>0mU2(F(?LFjj(b9vExExDbqs!MFm9>%n*&jE{oxT`+zE#!FzdeGbMeV7v~- zU%+1n{w={@0{_hpj#019|*b)9f6K*p<{a+bo7FbKG4w*I`)T-R_M4JI=%=U zKZlM#LdWaS@mJ`013KP;2;PNh2U@qj)dSu2<{HSsSw;7g3}?mKLihi z;K2}_1;M2dTmiwhMG(9Mf|o(?3J6{W!RsM-BLr`T;9U^xhTy#rd;o$ELGT#}ejI|I zgy5$j_-P1HAtVGsOb`+eAxRL@142?DBn?6`AfzvZ426)f5Hb@&=7KF`K7`al$N~si z1tDu7WE+I+fRLRKQV$^w5V8kC4nfHM5ONek9)ytN5OM-SK8H@;&?y5tS)o${bUF>4 zo`X*3q0>vy=@sboI&^vyI$eQIKR~A+vC-*Q=yU@*{SKje2yFwQUJ&X7p?(mmL8w23 z2100O2u*{~K@d6~LbD)r3WR1uXfA{nLg;h|ErHN-2we-Ibr8B8LU%%FJ%rv5p+_O~ zK?wD%|IiZ<`Uixyf-pY_lOZe+!a71&2!w?~SPX=9fiM$<#Y0#R2%8FFH4wHC!j?nW zN(fs6Ve24lCxq2Q*Z~MT1Y!3>*ii_35W1sAbcT&J0biKgx?S0k3;yA5dIW|KMmo}LiqC#{sM%*1mUkh_`48(0m6TV z@c%&gZHS11h^`Qk01>0X7BLATra;6Th*$&>OCaJPL>z~R6A7Kp6FM&x#g zbU@@Th-`q!S0M5RL`6f?M2K1eQ7(vj0HTgT)Cq_>2~npZ>I_7^22mFv>H~=S1fniM z)Mbde22npj)Nc@V6QXWG)L#&-gXoqJ?FZ43Hi(Xg=-v>W4$=J~dLTp(hUgIxJqn`7 zK=fpYo&(W~AbJTz3y59@(c2(;2So3L=z555faq@_`WJ}NK}<`CX$LVKAjTVFd?CgN zF#!-01TnT?h>3!j5fGCJG35|54`ON|W)Z|JftYm=vjJl2A*KOh_CSmiV%!k37h?89 z%n67&2{DgA%%c$V7{ojQF=rv>3+N1Vj)Bgjq4ON*TnC*Spljzn(D^>-d=NSxhS)@i z?GCZY5Ze=CdqHd(#AZNjUq@N0?lIdQh;>2iUWh#av7TW^A@&%=o`Bd>5c?>^J`S;G zA@*sAeGXz@fY_HI_BDuo6Jp)mImBLt*sme>JBYmovDYE?SBSj{ zv44Oq_BNPwU}^=X)?jJ}CND7gf=LFG2BrWobp%r+1Hm)|OvAx63QS|clnJJZV44D^9Bi1TfvFHo#b7D}lNn62z*GsQIbfO# zrdlv90+S6)OTn}POsm1P4on-tv;|Dtz~lf^J(wE7uAkF}BD#ZCioadb-E(GGjATAQ(VjwOS;^HAL z0phwtTnfbXg1B^u>kDxMAZ{eYjfS`ih?@^_3m|SQ#O;Q-Mu>X^;+}%Iry=eui2DiR zeuj8Ih!2GLju7uTv&Q#__<<0g5Aib~-VE_uA$~W+H$wcA5Pu%xUxfHCA^t~*zYbl4 zp=-3q^w2dEy5>OFJm~6#u7{xO{m}ISbiD*!FGDvYbPI)U;m~a`bQ=TR#$lt|eCQ^i z+cM~O0JWTPeH=Bknjs6{051EkQf1pQII$c635#hF$)qGLE;KXTm^}HA@L|AJ_w1gK;pZQ zcmWb`LXr-WT0&AFBt<|{6eRVBq!Exb3X-ZJX)z?(An5=k9fPEYAn7$ox&TQZK+^Bf zy#;h{1>J2X=-vamr$G1d&^;Tv=R)@d(0v(nUjf~lp!*T%{s44;6}rC%-QS1qzd`rE zpob26ghLM#^oWNZL!rkw=#dFM7C?_>&|?MkH~>A4L63)^$6L_jBe3*%y)x zklY!P6Ct@fBo{%l1(Ih$@+L^$3CZ=4{5T{(2g&Cl`4S|54awg^iUKLYkkScKdO}J+ zNErYrQz4}YQi>sE38buolr@lY92+TTAmwpLc^6VXfs{+oQwKfULC+4*GXZ+`f}Uy6 zGZ%W6K+iJhDWK#KQI4(oa+mPBCQrkjmETnda)MQA_gVa(; zoneF2b&$FZQg=Y=DM&pFsplZ|8l>KY)LYOi1bW3luP)GQIP}VdUK60#Z0NNBdM$!p zN1@j#=ye);eFD9{gkE1m??C7s0llN3_ekhH0eVk@-nG!%wiJ3Vhu%k__bKRo8hT%b z-rqv+?;))Xr1?Rb3~9X~Z6Kr#hO}}>n+IvNkhT}njzZdlkoG#Hy$@+0LV7Dm_kwgE zNbd>h{UCh+q?bW@C8Sq@Eqy1XyCA&@(w~F$*C723$nb#-f5-@gjH!@O1R2GUu@N#H zkg*Fg9)pZ$A>(<-_zE(9f{dS`k2mx&LZ1NW(;fPxL!Um-rv&4)Z|HA?{sGXx%m)1{p??+he-iqihyE|Z03!?t zg#qC(U=|FR3j^lEfCpf}BQW4m7;qB?>R@0?7&sUPj)8&WVBk6!xD5vGfPo*vz|Ude z6&Mr`gHm8nDh!$ngKVB30fWxLpqF6KD=@e%3^u@E1qNrr;2ap72ZQf}!3SaRVHkWF z27e2KzlR}FFr+ICNq`|UVMrAWsfHmB!;mLn$XOWjI}B|BLtDYn5im3hhE9Z`JHR&d zJ{anPp&!H0D=_p+7?upf`oOS$Fl;3Z+XTb5z_1H2>=F#S48s#(crO^92E*sUa61fN z3d0|R;m^YG=VAEoFro#FXayts!iZroVg!uX03&w5h@IFN@dk|e07iTSBST!$j6MXTAA->j!|4CO7|8}>TEmzO7&8RM41+PdV2m5a?1eGk!kAxR%x^HZAB-Ii zV@JZ+Z7{YG#@+{GKZmj3!Pp;QTn3CA0^^3kxHT|tD~#I)<1WCsOEB&-j8B5`X)rzm z#@klF_>C}rGmL*1#(x6iFF|GsWcG#3{*bu}GB-o!R>*t{GCzXMi;!i2tRTn=hO83E zDu=8}$T|mEFG1ETFrgJp@PY|GFkv!GD1Zq?Fkuf&H~_W@hhV}FFyRJF_#Gw=hl!am zaRN*{4inG7#K&Qh3X?)$QYcKC4wI}fsT?Lf0+XJCNl(LM2_}2PWM7z^1(S1O@-&#- z2$T22G6a;=bC0lBjww;FO6 zLhcgC6_C3eY`NXy@4|&@m&jESIA@2<2Jq~&AL*8eQ_c=^!2h(Jjroyx&n3e|9GGN*im{thW zro%KBOgjwIj=;3R6#WZ zL!fXt6pn+!EGV1=g;Sw$2^21eLMId+g2MZu@Bt`14uwxb;ZsofDippBg>ORP+feu} z6kdSB51{ZW6n>43!tbE)2Ppgz3a>-qFHra=6#fN83KSWkC=iN*p(qrJBA_T5in>5i z929kfq9iCvgQ5&5ng~VHpr`I`3XvX z0b6McC{>{}1WHX%8V{vOP?`*-sZg2$rG24v0F;h|(lRKmgwiS~t%1_{P`U<6*F)(> zDBS|3+o1Fcl>Pvve?aM$F#EbP__}uc0<`7D04yCUMM>NWrv~cD3qOnvd5w9ER>ytvS*;|c_@1U%D#db ztzd=;W{ig!^I%4U4QA|v83$m-L6~tIW}EFhux`HJUEIq)|6D+;Kk^z=}U>S%F%Mh>(2g@k1 zj0H<3SSEsH3RrT$G7T(+U?~Pm8Cc9gfaPtlya$#K!16IzJ_XBVuv`JlS77-TEI)wdC$RhimK$KX1(rX-3RrC|!Kw#q zTd;Njs}EQWU{%5D57r>ChJZB;tdU@i0c$K+hRNjEf-(hwz%#McHoniJ!m^}ezPlDMtn7tZi zuZ7tMVD>SX{g9{sb(sA=%>EE&{{dC4ph^!_!B7huu)S9HC0fv4Qd*p=02!74K?SW<{7B@4r+dd znj0|J2j=?2+(4L{2y=VG+;o^b3FhX*+(MYU2Ig*sx!YjwQJ8xQ=AMSRAHdwpF!u|X z*9PYK!93Xp^LoI%44Bs!=1qlpMKG@z=GkH18kn~Z<~<1WPQ$!2Fz-E>_bJT#4Cc3h z`R!r87tBwF`F&u1KbSuq=38NYIn3V(^BpjM7tB8o^WT8^Z^8WEV7~1ysMSGj7}Ul> zZ5-4NhuTc2odC5fpmrnFZid=ZP)5$b|(f zV8KRMuo)Jdfd$XNg6CktC0Ot^Ecg~|3k|R^2o?sz!XdD5EG!%k3m3t{6|is>EIa@U zkHNx+VByEG@Cq#a5*GQuB7ayE2#fl{qG7OT1T3nAMYXVKAuMWyMf+jVL0IG&^CK*} z4vQ0Eac^uaPKU*%u($#i&xXZyu($yh?}5cn!QvNT@yoFIDlEPRi+_S8?O}-mOEg%L z0ZWF!l3}o9CM>CfCDpK`0ha88B?n;1OR(f^Sn@6`xd}EMcy?Z!=WkZFc(8Q?TbAc_ z2yD~9<_6pSU^@!7n_$<0y(QQOfqgXC$AWz~*!O~cKiIE={TkSR0?`-5Fc2d^>;Q2e z2p5QJAZ~)V1xp9R(lM~qHV&4qgr%Ec=@wY}1}yymmVN}wf?-)SEb9!*ro%ETEGvg) z55cm>VA&I}T!Q7^u-q4xkAdZrVfj>8z6+MSVfkKI{uwO)29|#ZD`H_qcUX}OE9QZ1 zg&kHbg%xLD#WS$tIau*0tklEGHn6fktQ-L=N5RVVuyQ-Bbim3tVdaOg@?%&P4y#PC zDjrsq!>W0(suor~46B}iRcB$f7p&G`wLh$$2CK_p^-OH6eh^llhSg_aO&eI_2WwB%rViFLz?wa<=1o}hA*}fr)*4}LD69>KwI#5&9M)FC+Jmt6IIKMZYkz~a zf5AE(tm_Z!M!>pJux>r9+irt(4p{dpta}gEy$|cdV0|pCkAwBquzoSDx54_4Vf__Y z|0Qhb3LARDhF-8iz=pN3VLfbk2R2-U4WGhB6*h*z#!%Qe8#XS0jf-I8GqCX$*!UW3 zviZZNFxV6Uo2;;@1~$!uO^?B*XJOOxuvvx8A+R|VHW$KXGi)J*!DbZdmXmD3ERV9 zdn{~^gY9cz`&QV#4Yq%ejqSg|_M5OH4R#EI9YbKpCfKnPcGSa;i?HJ=?Dz^CCUEos zM+!JIdH(O} z2D?&WS8v$09CmGhU7KLnN3iP)*mV`^W1v0(>XV?p9O~ymeJ#}2L;V4$KLquMq5giT zKMnO~p#C|ie*x-WhWgi_{!OTV2kJkC`pZy%1?p{ILH)N-{{!p}f!)Jl_k7rW0Cr!1 z-9JME(9i-JBxq;@4eg=98ydo)!2}KQ(2xWT$AsZU< zpuqwSOThE;Q`)c^8n#1&0~#8j;XY_^L&E`RI1CL(q2UxXyaf#(LBmC8xC9NKL&Go7 za042Chlc+^!)<764UGykYS0)AjiJyO;rM8>F32_r8fQY|3TWH}jXR;S9vT~=(Fu)5 zq47azd>k6jLgUlW_#8C80F5t0<7?3P5j1`Rjh{i|=g@cs8oz|buVIf5>@mTfOxQCY z_Uwc`Pr#n%Vb2R-bB+e*IB;fxa}qeGf-@JK`QR)9X9+lGfYSoba&XQDXEiwIfpY;k z7lYFd&Sl_S3C=a(To2Ao;M@w%?cm%A&fVbL15Ot>_k!~PI1gjPc@&(-z+bR)A|YxYmJdBe=GJYa6&6;Hn2#BeSu7|*N z5?rUj^%%IG1lKumJqxb$U~|0$u2;eJ2Dsh^*L&dl09+q~>r-%D2G!Sw^U zegfAo;JN{>Tj2T=+!J09E# z;O-9Y6ma(fcRINHf_nhC2ZMVUxJQC}47kUGdjhy8gF74CdEhPp_jGWVf_o;ot>CTz zcNMs6z&#(_3&Fhv+ydOo!M(}`?zQ0F0PfA;-UaU6;C=$!&x88~a9;xV*WmsZnj~oQ zh9+NVN`a=n(9|EA@}X%4G?}4kB{Xem<1MF2{ugwU1 zlVNWk*xL{Gnqlu8*jod8Ps84Gu=g3*dkyy9guS<5UpLs73j2D)zC74h3j1cjzRj?2 z7wp>&`_96?7hvB@u;??a{% zNEl%RLI!~(M6GpKYu&S2NA0k#TCLVS>()=Lt6HnpS@)=wy*Gj&Q-C10&(a1w(v7~Gk` zCouR72A|2`KQj0Z2H(Ztw;B8~gFj{PbOz63@B#*JXYeit*E1xGA-x&WN5YU37;*+f z&Sc274EYm7Ze_@;40(qk?=fU1L%w6k_YA3G$dOGN8QP1X1q|)a&|fh0B8FbV(Ayb$ zKSLj6=o<`upP?TzbQ(kFFmxV6w=uLX!O-0d%VbzDhUGBqB!->Iu(KF;JHzg0*n{Eu#WY~8M`<`LV3_HZI!wk=6ct3{cGyDpM|DNH0VE9uEe}UmIF?=Y)$1!{Y z!xM`czLMdq84-*)nh_a{C}PC1jQ9y7E@Z?NjJS#scQN8&Mm)lZPZ%+n5knaEOwwVqLxjH+gIM@DyNbPqB^YX7;_F|&ST7NjJc07 z4>0Bf#{8Et|6|Nj#;j({TE?_6rk$}ETg=$w8G8a_uVd`3jJ++v*nczj1IB*D*y)U& z$JhmoZDj0z#vWu`5#x?!+)o&H4dZTL+)a#ofpM=f?hVF`W8Bw_o58q)j89>F8sjfy z{1uG9it$e}{&~i~$oL_QPmE>!c*d__{6@xaX8ZvrBr_qE3HeMYWkMMfu4ck@Ot^sw z&oJR7CcMmqu}ql8gy~G!#Dp3q>|kOiCiY-rFDCwsiRUu$d?x;di4QRGArccOGjSFZ zXEUi2lX@_z7n81K(w~@gE0e}BX)2SxX3_yBCo?&f$=5LX1}5LcsR^u9lm~#ub1-mYQA2} z^sWh}_hEW2(|^nKo0xtx(?4hWFs6@SdKJ^_ncm2ZQ<-r#GtOnkbIf>^8Lu&8Au}qN zv7B!T`KF9-j^&$&_~t3Td4_N1^37ttS<1{@W)5U#qJ){ZGV>m0-p9-#%pA+i@yx7Z zW<4_-nRN`aPG;7r%zA=Z&oS!-X3b#Md}e*ew;lPmJKy%;+u!o-O?-PZ-+s=w!}xXt z-)`mGoqW5C*{6`0eHOFNVfM?+ev{d6GrN)5`Iqx#(Q|5fmoLc5I zF{hcigP40Fb5CaOi_CqUx&LPFYUXZX?l$Hf%e+&VcN+7aW!@{ydsV``Wz1W{ymibU z#QYPPe=_qQVSa-7&oX~5^A|IJDGTygP{M*iEO?LwPqN@?7R+YB4=h;Bcm4V97`{7> z?;hs6r}-|ycQg6!JHGp#?+&vtJ;B1GS$H)IuVdj2EF8tc$t;}8!bTSEXW>Dtr&t~x@EdC{nUt#fEEPjW@%UQgE#hX~thb2WU8OV~`SaKgr z9$?8xmP}&F6qdBH6iYj>^n8|H&eAJb`aVmKJm@U_lBEqS-N(`cEc-djE@0V3EPIY+ zud?hlmW^iF6qZe6*$kG=V%b8L{lKznmNm1im1S)#+e>*0M%yNcj}Xzoz^f%D<(2F69d-|DN*2lrN+FN6J@HzUIim>nYzv`Bus+DX*b?Clwb{ z@k=UxMa5NATtmh0sQ3dFH&F3MDsG|THY)C*;%+MLqvAm-{z}DTR6I$=GgLfF#q(6W zM8zvq{ELb=s7Sm;#k*9zPsK-6d`iU^RQ!*MAyf>fViXl)shB{;WGbdnF@uU(RLr4b zJ{1e8SVYBAD%Ml6k%}!;Y@?!%irxHJ%8#e;<7xbO20xxj;>Qd5@nU{_jUPYc$B+5( zGk*MnABXefNS0%HCzfZjyerGQvpkpOc`W}O%Wq}*Z7jco<#)0CVU|C_@^LJm$?|Vm zK9}Y5S-zCz<*Yc06-R2FD|)e_4=Zw6F_0A{thkXCcd_CgR@~2u2U+nXE1qV>WLC^& z#e7yQWW^7xSk8)-tju9$F)K@1c?>I$W91{Pe2$ebu<{?Qe1(-2tlY%PEeTd`XJr+u zj$_pstU8lbXS3>DR=vWi_gM8lt3G1YC#*UWyN6XRtlG<}{j5%6bqcF5W%ch^eJ!i6 zXZ4M&zMa)~uzC!uXRvxEt7o%%E~}5^f2)_WCX+RNS<{a-1+3}MnqydV9Bcl}ng?0) zFl!!V&Eu^3J8Pb2&G)QX#hNv&S#k(o)vUXYbvLl? z3)YQh-B{L5VBI9v&1BuTtlP`_6xOG){%F=`us&PD`kt)6j`e?G{hh48hxPZd{xQ}+ z!TK?*pTYW>te?&LxvXEz`lW10VMA9obZ0|PHuPpgAsdR=a4j2dW5ZwAa2FfyVZ$SA zc#I9x*|3leKP1?&gbmBsu!arm*w~egd2B3TV-Xt%vhgQu{3#pnWaFc3e4LF>vGEx; zzQo3t*|?334Qy;;V+$MG*n~|T*mOFZE@IOqY`Tn1zhcwx*p#@IO`o%A6r0AdX*`=I zvgsQ(&0^DjHm9-qC^l!Xxig!4vbi^#Z)fwvY<`5zkF)tnHb2ki7umd;&6RAfX7dg< z*Ri>U&24Nsl`ZG9VkAe#@3u+44SHK4i-$Z262WL)kK%EeF_|&eo&Z z+KH{1Z0*I?9JXH1);rjG7hCUT>-}tfoUKo?bv#>Vv2`|E=dpDGTbHr5Lc+FewiU9i zh;7AeD`ndWY&(fsz+keXT)7gFz+b?1J)olMwg6+R&`*m!;k?ps${Z6*u!}j~x{t(+=WBWw5 z&tdycwl}lAoyrs{yHeSm%3Lb*sq9bXKq^bAJb}uSsXUF!GpPIpl^0QY4V5=hc{i1b zd#QYs$|tCNn##XX`5cw6Qu#WSZ&LXVmH(mgb1FwsIfcr3R4$-$8I?a$xsu8?RIaD8 zlFAw?cT%~V%0{X(s5+6VbEvw5st2ihl0?;WRJ}pfn^b*B)hASaPSuxG4W?=gRpY6e zMAcNPrc*VKswGsdqiO?HRaDhdRYz4lRZUbKpsJnfpgNiAG^(?x?oD-ns*k7o1PRq= zQ2h(4&!PH!sxPAYN~(WN^>3-Zmg?)N{xj7NQvDRwFH`+bs^6yiJ*q#T`eUj;qk1US zBd8ut^*E|0QvD6p3#eX3^#-apQC&mz&IHxFscxjYnd(E-peBi$RBDc*rYkjls41al z5H%-Ka|$&-qvq$-oJGyW)clf~Ur}=vHP=vc12wl%^AI(ErREuGo~7n_YF?t|6>5&$ z|26MY^FB2nQS&J^L#P=`%{SD{qGlmAi>O&jO$9Y8sM$!(7HYOrQ%%hdYE!5^hT7Aq z{S&o!QTs5p&r*@nA%~~j-+-HwKJ(*MC}r4S5do` z+6~lhrgj^(b=1~V+eB>(wR_pojUA`6<6?H)&yL60@gh6kV8_So_>>((*fE?Pqu6mo z>#$=2J7%zB7CYv!V?H|;vST|tJF>GkJI`U~rR@AIJO9Yed)Rp&J0D}`lk9wkozJrK zd3OGboo}%7Eq1=k&iC0lft~BvSxa3P>T;+nqVBi^b!Sj_CUqB5cL{ZuQFjG(S5tQ# zbvIIXGj+F8cRO|WQ}+yYFH`p}b^oF6bLzgNZZLJjs2fS$Bu2kMqn zw}HAH)FtYuYoTs0bqA|Vz1wd}5DcP+b{*xkbJz3e`~?sn?a zs6U$ePSj^n-%Ub&U+M=?e;oB^P=6-%7gB!-^_Nk91@%``e;xHVQhzh`w^DyQ_4iZ% zIQ7p_{|5DMQvV_KpHTlf^#2YlcNyCRUd_u$LG<-?JU>e5IFrJ1P%A?n!3}JOH)2g{b?FVQz=d7 z({vY2kJI!yO-JfKO_OMvNz)>lme90{rnNL}plLHr+i0qzsh*}Lnp$Yu%bs9QC-(GY zPZ4_tvgar4Ie|SVv*$GSoWY*+*>e$lE@jWYN?(R?M%SJQkQ%{S6~GtIZs zd^^qe)BF(4kI?)$%}>#MMC;J}Z<;@(c?iwJXdX}VB$}tvJe}s5G=E3)4>T{Kxt!+Z zG;g4}n&w8D+i5||QM6>xl1WQfTC!>BM@s=MMYI&tGKiKFX*q)bS}vmH8d`o!%T2WW ziIzXp@)ug}qUB*)9;M|8TArrmZ?wEj%e%CEPRl4-#?UgAmg%(2q-8cO^JrN@OF1pe zX<0?fT3WWzQb$V*tsNw^Cezx9)+}1P(b|L7-n16dI)K&^TFYoXj@DCYJ)71`X#Fj% zzo+$PT5qNGc3SVG^&VOuq4jZEpQ1HE>)&bpC#~<$`YEj=XdOlCLF;N-x6rzq*1fc)(3VD97H!>V>p@#@+WOKqfVL9a%4j=|wx7~=I&Bxy zb`@gus4Oh>Fn*u-tI?;JU!W)!`@u>=Ck)$ z_Wp#uC$RS<_MXY!v)KCsdsnk}EqgbxcN2SS*t>&$_p8rU z*|&;)YuLA*eH+kj0#{L=XU&8)s z_U~Z-F7`LDzlj6SaNs2lyupFd9GJv`6&%>ffxR5q&w)c6#K9yErgE^5gGa>E2ao08 z@f;ch4&BM2M>+I3ho0h4;z;{DhhE^&=NuZwp%EM!&7pA|n!};_99qbsMI2f} z`yXh(h4%Yte~|W=cnN z;#d(s5pjZulSJGh;t>&#iFi`P(;~)+m?`305pzW>5b?c;#Ug4&G$lkdi)a(EUv$V6 z9STKAJM2DY<4kw5XCyNfJi4GTt4%dnfcZ&}9i4G5l4o`>NIp>{pCpn`70IWIK7k@BQSc}AqXB~soIDesDu_eIJ_BIQ$&GD4(`7Aa#z$~Piq zmPnZ+Qs#@4g(78O+W|7h&Qud0J10tneq;?dkokePvNX-?g zc_Ot?qz(|NB_g#UkpdLXmonNWD>{-X&7+5vdP~)W3?<$3*JW zBK2<~^*NFHo=BvQ7OBfc>N=5HBT{#W)JBooEK=J<>VA z0g?8ENP9}8y&%&5A=3US(q0p3{}ySVh_uf{+F+43Or(tzX=6m%WRW&aq|K0sv{@pp zR;2e7>F0^`%S8IGMEW%%{brGVi%7pyq~9ab?-%J0iS#E#`qLu)ZzBCUk^ZVke_f=% zFVepf=|e>NXpufnq)!y-Q$+eqkv?0b&lBn2iS%-jzB(b&w}|xZqT~6Z<5i;LuSLf{ zijKF4j<<=9cZiO6i;nk+jt`2C3DNQIqT}QzG#uXytYLRiB$oP}UxLsrHjHx1Hy2zL*GM0#pa*?rIWULYyYemKek+E51Y!ev`BBM!Uw1|v0(TV8P zL3H|==yah-bh=n{`laY}x#;wk==7QB^o8j3KhbHh=(J6AY7m{8M5h+fsZDe~RdhaI zbiPn@zC?8XrRe;I==`zh{Hf^th3NdH=)6O8J`%rAbZ!@!Au^Li=20RuLu6)3L}s?g z>?<+{h|J?e<{2XMOp$r6$h<&gUMw;%7nxUz%wLPlYenWCMdlqM^FfiB5Sh=4%oj!G z%Odktk@;_t`L@V>Ph@^9GQSd;qebR)k@=0toFg*li_C>b7B3N*7= zk=Y_L_lwL!B8$jM5n1UXtE0&3BC>KsR)3LoyvRC1WSuIqP8V5cimY=*)&(N#Vv%)~ z$ohlGx?N=5A+qijSr3S;hecN6agp`Zk%7qihsgT3$ofEJeI&BRimVADYqH3iCbDLT ztXU#!j>wuXvKET0A4Jv?k+n`_Z4g)F8>l;UKd^dBf5Mbx>SfRn?#o_qRV#CrAlO0 z(e)zH^-|IGa?$ll(e>A&>u*KZYem=VMc11|*ZW1+heX#$MAyef*QZ3+XGGU$C8Fzd zqU(#I>*u2D7}0gP=-ME<<%w?Rh;BECZl8#5lSQ{#qT6iIZJy}%o#?hgbXz65)rf98 zMYr9eTchaKEV{LcZu>>IL!vv;y^HAHO?1x|-Fu1deMI+u3DLbkbnh>^4;0FBjQYitNO-BKtOx{TGpakI24XWIrUb9~0S6itJ}Z_DdrB zZIS)1$Q~@Rhl%WwB72O;9xt*diR`H&d%DP;DYBP|>>ow;N|C)rWUm+5n?&|jkzFaW zYebK9(W6WxdYmPC+$4HDBYON-^q4PtY!^KmMUOqAX9v+UMf6M;Jv)k?okhmq61%H=@_?MX&2buNy_Ln? zuV+QC=S8oVM6XvwuYZYN<3+DkqSryuyPxQNvFQC5(ff7L`#sV7vxMk9QuH1pdQTO- z=ZfA7MepUJ_e#-wljyxw^xi3Y9~L>OA}2@W3=laNi=5wxoZpF@J4MbtBIkaQ^N`4S zMC3d!a-I@La*Uk6i<}ok&VNMCha%?_k@NWx-cI+;c?kxgz&kk$aoS{fo%GOXS`w zavu=64~yJqMecJV_j!@~g2;VSFmu(> zk@u;{`&{G=6L}*=-WZWLUL^7+iM*L2Z??#rC-S}%c|VA}`l68T4q{GKAex5)1+^7BM~p~xQ~@=p-?CyD%%MgA!w|2&a@fylp1Gg0t`C>SgXhKYiaqF|gTm?#RSh=Tc|V5ulrFA6q_f^DLp zN)*(Jg59E^Q4}V94`t_6osdV!k>x4pNqn?MB%xj@B&eIu_(M& z6kab1ZxV$!i^5w(;ccSu4pDfwD7;S;{#_J)C<=#)!U>}Adr`Pc^bgTLL-g-0`u7q2 z`-%PqqJNR-Un~;+uMqvO7yWM({cjfiZx#J-7ya)P{qGa~pA!9_5&d5f{r@5Q|5NmT zP4s_9^#70O|AFZLvFN{0^xq=-?-NBGL{YLRN)ts#i=v*QsJAF85k+O9=r~dIQ&Du1 zL=>GWicS|rXNscBM9~$Z=xR~)8&ULoQFNUsx=|F}EQ;D81RG`@U$3^5Cg`F0n@~Q=|?7W#DMu?z(O%# zkr=Q-4A?9NY!d^j#DID+u(KH0O$&{Dl~Jju?2U7Nl2Qlz=G4KvC z@NO~iJ~8kyG4M4p@GUWLs2Dg-4BQ|FZWYCyL~$ZV6!#Uy14MC&C@vGl$BE*fisI8n z@tLCdY*BokD85h>Um=Qb5XFBH#gB;MPet(%Q9Mi(j}*mYMDYSqyj&Eo6vbOa@pe&M zEsA%D;$5P+K@=Yl#qAfGYQKB?Ml;()i{-X4FQF?+XJyn#RE=tc7rRR#$ z3q-GJl{|7UxS{6CAdBV8Qx G!v6yV3c~gP diff --git a/main.cpp b/main.cpp index c249238ccd..acb6e3ea94 100644 --- a/main.cpp +++ b/main.cpp @@ -33,7 +33,6 @@ #include #include #include -#include "tga.h" // Texture loader library #include "glm/glm.hpp" #include @@ -60,7 +59,7 @@ int serial_on = 0; // Is serial connection on/off? System wil int audio_on = 0; // Whether to turn on the audio support int simulate_on = 1; -// Network Socket Stuff +// Network Socket Stuff // For testing, add milliseconds of delay for received UDP packets int UDP_socket; int delay = 0; @@ -79,16 +78,9 @@ int target_display = 0; int head_mirror = 0; // Whether to mirror the head when viewing it -unsigned char last_key = 0; - -double ping = 0; - int WIDTH = 1200; int HEIGHT = 800; -#define BOTTOM_MARGIN 0 -#define RIGHT_MARGIN 0 - #define HAND_RADIUS 0.25 // Radius of in-world 'hand' of you Head myHead; // The rendered head of oneself or others Hand myHand(HAND_RADIUS, @@ -108,15 +100,6 @@ Cloud cloud(300000, // Particles false // Wrap ); -// FIELD INFORMATION -// If the simulation 'world' is a box with 10M boundaries, the offset to a field cell is given by: -// element = [x/10 + (y/10)*10 + (z*/10)*100] -// -// The vec(x,y,z) corner of a field cell at element i is: -// -// z = (int)( i / 100) -// y = (int)(i % 100 / 10) -// x = (int)(i % 10) #define RENDER_FRAME_MSECS 10 #define SLEEP 0 @@ -157,7 +140,6 @@ int head_lean_x, head_lean_y; int mouse_x, mouse_y; // Where is the mouse int mouse_pressed = 0; // true if mouse has been pressed (clear when finished) -int accel_x, accel_y; int speed; @@ -257,7 +239,7 @@ void initDisplay(void) void init(void) { - int i, j; + int i; if (audio_on) { Audio::init(); @@ -603,10 +585,7 @@ void display(void) void key(unsigned char k, int x, int y) { // Process keypresses - - last_key = k; - - if (k == 'q') ::terminate(); + if (k == 'q') ::terminate(); if (k == '/') stats_on = !stats_on; // toggle stats if (k == 'n') { @@ -798,7 +777,7 @@ int main(int argc, char** argv) glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH); - glutInitWindowSize(RIGHT_MARGIN + WIDTH, BOTTOM_MARGIN + HEIGHT); + glutInitWindowSize(WIDTH, HEIGHT); glutCreateWindow("Interface"); printf( "Created Display Window.\n" ); diff --git a/tga.h b/tga.h deleted file mode 100644 index c45f1730c3..0000000000 --- a/tga.h +++ /dev/null @@ -1,470 +0,0 @@ -#include -#include -#include - -#define IMG_OK 0x1 -#define IMG_ERR_NO_FILE 0x2 -#define IMG_ERR_MEM_FAIL 0x4 -#define IMG_ERR_BAD_FORMAT 0x8 -#define IMG_ERR_UNSUPPORTED 0x40 - -class TGAImg - { - public: - TGAImg(); - ~TGAImg(); - int Load(char* szFilename); - int GetBPP(); - int GetWidth(); - int GetHeight(); - unsigned char* GetImg(); // Return a pointer to image data - unsigned char* GetPalette(); // Return a pointer to VGA palette - - private: - short int iWidth,iHeight,iBPP; - unsigned long lImageSize; - char bEnc; - unsigned char *pImage, *pPalette, *pData; - - // Internal workers - int ReadHeader(); - int LoadRawData(); - int LoadTgaRLEData(); - int LoadTgaPalette(); - void BGRtoRGB(); - void FlipImg(); - }; - - -TGAImg::TGAImg() -{ - pImage=pPalette=pData=NULL; - iWidth=iHeight=iBPP=bEnc=0; - lImageSize=0; -} - - -TGAImg::~TGAImg() -{ - if(pImage) - { - delete [] pImage; - pImage=NULL; - } - - if(pPalette) - { - delete [] pPalette; - pPalette=NULL; - } - - if(pData) - { - delete [] pData; - pData=NULL; - } -} - - -int TGAImg::Load(char* szFilename) -{ - using namespace std; - ifstream fIn; - unsigned long ulSize; - int iRet; - - // Clear out any existing image and palette - if(pImage) - { - delete [] pImage; - pImage=NULL; - } - - if(pPalette) - { - delete [] pPalette; - pPalette=NULL; - } - - // Open the specified file - fIn.open(szFilename,ios::binary); - - if(fIn==NULL) - return IMG_ERR_NO_FILE; - - // Get file size - fIn.seekg(0,ios_base::end); - ulSize=fIn.tellg(); - fIn.seekg(0,ios_base::beg); - - // Allocate some space - // Check and clear pDat, just in case - if(pData) - delete [] pData; - - pData=new unsigned char[ulSize]; - - if(pData==NULL) - { - fIn.close(); - return IMG_ERR_MEM_FAIL; - } - - // Read the file into memory - fIn.read((char*)pData,ulSize); - - fIn.close(); - - // Process the header - iRet=ReadHeader(); - - if(iRet!=IMG_OK) - return iRet; - - switch(bEnc) - { - case 1: // Raw Indexed - { - // Check filesize against header values - if((lImageSize+18+pData[0]+768)>ulSize) - return IMG_ERR_BAD_FORMAT; - - // Double check image type field - if(pData[1]!=1) - return IMG_ERR_BAD_FORMAT; - - // Load image data - iRet=LoadRawData(); - if(iRet!=IMG_OK) - return iRet; - - // Load palette - iRet=LoadTgaPalette(); - if(iRet!=IMG_OK) - return iRet; - - break; - } - - case 2: // Raw RGB - { - // Check filesize against header values - if((lImageSize+18+pData[0])>ulSize) - return IMG_ERR_BAD_FORMAT; - - // Double check image type field - if(pData[1]!=0) - return IMG_ERR_BAD_FORMAT; - - // Load image data - iRet=LoadRawData(); - if(iRet!=IMG_OK) - return iRet; - - BGRtoRGB(); // Convert to RGB - break; - } - - case 9: // RLE Indexed - { - // Double check image type field - if(pData[1]!=1) - return IMG_ERR_BAD_FORMAT; - - // Load image data - iRet=LoadTgaRLEData(); - if(iRet!=IMG_OK) - return iRet; - - // Load palette - iRet=LoadTgaPalette(); - if(iRet!=IMG_OK) - return iRet; - - break; - } - - case 10: // RLE RGB - { - // Double check image type field - if(pData[1]!=0) - return IMG_ERR_BAD_FORMAT; - - // Load image data - iRet=LoadTgaRLEData(); - if(iRet!=IMG_OK) - return iRet; - - BGRtoRGB(); // Convert to RGB - break; - } - - default: - return IMG_ERR_UNSUPPORTED; - } - - // Check flip bit - if((pData[17] & 0x20)==0) - FlipImg(); - - // Release file memory - delete [] pData; - pData=NULL; - - return IMG_OK; -} - - -int TGAImg::ReadHeader() // Examine the header and populate our class attributes -{ - short ColMapStart,ColMapLen; - short x1,y1,x2,y2; - - if(pData==NULL) - return IMG_ERR_NO_FILE; - - if(pData[1]>1) // 0 (RGB) and 1 (Indexed) are the only types we know about - return IMG_ERR_UNSUPPORTED; - - bEnc=pData[2]; // Encoding flag 1 = Raw indexed image - // 2 = Raw RGB - // 3 = Raw greyscale - // 9 = RLE indexed - // 10 = RLE RGB - // 11 = RLE greyscale - // 32 & 33 Other compression, indexed - - if(bEnc>11) // We don't want 32 or 33 - return IMG_ERR_UNSUPPORTED; - - - // Get palette info - memcpy(&ColMapStart,&pData[3],2); - memcpy(&ColMapLen,&pData[5],2); - - // Reject indexed images if not a VGA palette (256 entries with 24 bits per entry) - if(pData[1]==1) // Indexed - { - if(ColMapStart!=0 || ColMapLen!=256 || pData[7]!=24) - return IMG_ERR_UNSUPPORTED; - } - - // Get image window and produce width & height values - memcpy(&x1,&pData[8],2); - memcpy(&y1,&pData[10],2); - memcpy(&x2,&pData[12],2); - memcpy(&y2,&pData[14],2); - - iWidth=(x2-x1); - iHeight=(y2-y1); - - if(iWidth<1 || iHeight<1) - return IMG_ERR_BAD_FORMAT; - - // Bits per Pixel - iBPP=pData[16]; - - // Check flip / interleave byte - if(pData[17]>32) // Interleaved data - return IMG_ERR_UNSUPPORTED; - - // Calculate image size - lImageSize=(iWidth * iHeight * (iBPP/8)); - - return IMG_OK; -} - - -int TGAImg::LoadRawData() // Load uncompressed image data -{ - short iOffset; - - if(pImage) // Clear old data if present - delete [] pImage; - - pImage=new unsigned char[lImageSize]; - - if(pImage==NULL) - return IMG_ERR_MEM_FAIL; - - iOffset=pData[0]+18; // Add header to ident field size - - if(pData[1]==1) // Indexed images - iOffset+=768; // Add palette offset - - memcpy(pImage,&pData[iOffset],lImageSize); - - return IMG_OK; -} - - -int TGAImg::LoadTgaRLEData() // Load RLE compressed image data -{ - short iOffset,iPixelSize; - unsigned char *pCur; - unsigned long Index=0; - unsigned char bLength,bLoop; - - // Calculate offset to image data - iOffset=pData[0]+18; - - // Add palette offset for indexed images - if(pData[1]==1) - iOffset+=768; - - // Get pixel size in bytes - iPixelSize=iBPP/8; - - // Set our pointer to the beginning of the image data - pCur=&pData[iOffset]; - - // Allocate space for the image data - if(pImage!=NULL) - delete [] pImage; - - pImage=new unsigned char[lImageSize]; - - if(pImage==NULL) - return IMG_ERR_MEM_FAIL; - - // Decode - while(Index Date: Tue, 20 Nov 2012 21:51:09 -0800 Subject: [PATCH 013/136] Added spaceserver messaging. --- .../UserInterfaceState.xcuserstate | Bin 101683 -> 101676 bytes main.cpp | 14 ++++---- network.cpp | 30 ++++++++++++++++-- network.h | 1 + util.cpp | 5 +++ util.h | 1 + 6 files changed, 42 insertions(+), 9 deletions(-) diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index b653b6168a2bc179a65c76fe4b742f86bf36cb02..b310c1ec8998fae6553ee58651446f823ff07bac 100644 GIT binary patch delta 33413 zcmaI62Y3|a7xw*IGPBZ>%$~ zbL)$ZU3%J3$I$x^bF93@xjqx4nslo85EWt1{n8KaC< z#wp{KLZwJ4R%R(BN~uz&lq>a0gECK{jkq9#Hlu4=N8S4=a0>{mOI7^U49`4dqSci1LRsyH>OJZ{^-=XP z^>Ot@^`QEa`m*|t`mXw(`o4Nx{Zc)lex?4bo>6~Me|5BT1UV#!;!qt9hvw++h;<}5 zk{m8asw2aZG1f83F~c#_QS2yp%yv{bY8-AygQL;0$g$Y5!qMyy zj9`@Mrq@;DcW?cSS!=2wOVbS)~GGlR%&as4cc|uM$M<)u5H$~X}h$0v_0B`+C$nC z+LPK-+SA%I+RNH2+9B;#?JezN?Gx>o_Nn%nNBdm+Li=9(K|7`WsGZgR)c(>g=%Koz ztGYwibX^bA!}V@@gdVM@>)CoAy}v#{AEFP{N9ZHznm0`n~!Cy5}K%pZ>W1w7y?| zL4QgANdH*>L_el~s(+?`u79B)*T2+H>gV+H`UL~SX0$Vc49SoUyV2QDjV?x4BgTj| zvW*-g*XU#PHS&ypMt@^~G0~W0Og5$%Q;liHbYq55Xv{L^8S{+=#zLdfSY#|VmKaNo zCd1<~yv8bHow457Y-};M8m-1QW4p1#xWm|K+-dAG?lJB+9yA^?9ygvdo;MB{FBmTw zhm3cOca8TnxADGl-1yQsVSHtLZTw)IGJZDB7-x<1redn5!_-XOG)&X9%rG<3>~6-J z31+hCGSkfrGt=yA4l>7@o^j@QbGkXhoMo1qwPu}JZ#I~V%w^_s(`&9ZuQ9JRuQP8l zZ#Hi+x0&0`9p4Ai{?S|u=$$#zWIUqp?TCiW}Y^GGJiJD zn7^36n!lO9n}1kAmSlCbLM(@6SYcKdE5eGh;;bH4#w;t-%CfSp94puAXAQCjTNA8_ z)+B4PHN~206tettHk{%WDa1m9^Ho%Gzk%Y~5nrYHhc6Sa(?WS^KP~ ztmmy4t(UD=t;5!v*1Oh+)+a5_;6b*HEp@>+E4f0>6Y?M-mk7C3$m<_6LkeutTS7YX zP-E!rSKEumdOthU@geK>shOVcLW&hqqL7?I>LsL9A!P`uuaNo+X_%1432B;;3WPLE zNaaGR5z+!7Ef&&pA*~eBDj}^E(iK9wT1Ynt=_VoFCZw%G+9jlWh4g@s9v0H0LV8k2 z&kE^)kX{nft3rDHEjmz28Ra3}c<9?B&pPZyuI(rKzxmLfFGFmZyM@$END)G^2`Msm z&?7~j$DLQ~ozaw%9F>)x(>$XoJv}8kDy7-a(veP>p$~^X*0S+rSVZXKp-TTp{&oX+G|4X*}6AC_;$|aK$QdZPq*?^=sL8GN!jvN%37AhmxwK;Zib$G(<>41Nr2)*iI~OS@lhi-u;y!e!l_AKxL3JSV+T# zG(t!tg*0l1GE~XOr3@F+Xd#UW_>B$tP5w5q_e5oy-*1vKS(&0t71DSiO%T#VAx+w$ zOjl;$QVN7LSx8d?ep3T}vERi9RVZ`)84Rvb+_;oFAx#(3i~we)zgpW+?1M| zotm2*m70;8+g#9;mXej5^RKEa3zemTS}Dtv<+zj;LMjweQGlno<&E#+gH|c){BgAP z$`!bjD}_`dq|$)BtR?RIq~05q8~ysPS6Y-ClubgKEu;z|RSKzUhvHLi!lm3Iq-r6} z3HZzn=>P5eq~1G}yZnB4C_9xqm0dz|3#nE}bwaA&q1>(9gG;$jNDV@o7x0@O@O$Nl z9zpw*Cj)w(Ql7@8JS(JyLTU_P7PUm5iVu2GIn}ulG;3@DJLi6-08-#Ra!1t<_E~k6+?5@Un^LD6FYP1@o#tP{g zAzdq^>x8s%hZ?UYw9#EJq?VTE-oK1UPQ>bM$_{=gbp@O{xC= zN=?bkY@X4Sk^LWz@#>V8S!cRvQ`Kp>)EPp$T}Yb)6kA$m{p{>LOP%e{tVAtU%hYlq zwF+sQkhTkH#}2hZt;D5P3+WCa?F{(b*tAae#M;x)hhXTu65b>FxmL zo|X+~dIovb%iFZCRoCHCuMpCGLfRbw-0#=E*;TNoe_>NBRuxV9SYLlY&mFKllLy)l=$^xYVD7^s=fHx2dPSX++1urmPJAWMrjh zG|y~G&CL8yjel3q`Bem;S1+`6;$b1Z)^h!CF+p}mCqKQ%(b*A<%MmK1H-z+Nfc}V| zzE%IN+myB!RAxr=jHa~A?0+TbFdSXIc{?1Y!*YZ$!k2cJE0Zagr?Cnyb zh?YG{WXsCGX1A>Pqhna_HqZx`Kkz2=@Z}lcDA^%q27{KN4}7bz0Egt;5)}~#|XzrM|8`FXCuNt z71C$^9E$yWH8-nQ`>T&ogWECAF}`KDx@!3 z9n*z$JP@Ntp+82EkiNVWW0qs2qm%?8eH93C!drYP@5O4BV~(GvT1Y2b9dm{BO@Jr1 z&d*aXq;D_r%yW!%Eb#MuAK>|}CFR^Cb*W>y-+h^oPPICkg!E&ZyT|YD71HTT?yDRl z9c%pVKL^}@YB_&)QcLpr@f|lfeBQi!9alQ8Y7yu4AR+zI;yWK5bg$#4mdDP=_2@3- zSRp40*(Kx@A*TvCUC0@iD!a3-CcpY@Hk;NvXJ~$Ac6xq_D}QKinkyqeE5|i>aB7Y# zBRefWD|<-#;GF#2OTPEC{CPfn)2lXD`v=-s&IVXEt+Ms9?QOaGLRa5To2^&-$J>nO z1ICtF7kb&A+0@k$$-qd5))AaXlDWRUIUO?zxzTkgEj%l6u)v$n3Aa&5iZ zztx6lA0Y7++ibmT@B6N@*}AqE7kah-xXs$REyRx(dRT!|o8xoGasU2)A>@!&$CpA5 z4eW2Xlirf8m-_8H#}9tc_d-@%9jAQt?QDtNe)d=3jF7c=_{Q<8<2Qf9Q4w0~IP3V+ zx2K&gDdhLJ=-@c-xS(Mhui1Pj+u5As?184!f;6dc+~kq<6_raTlr_%TrpYw?bD<4& znby8UF-H!~AJb4hv|;w5y2|>-F%7fJ8s|3DZ`V3#9ew3Nw!WTVE%a}7nj++|BXpn> zCp67l(yHk~?(!drTA1Tc#{*v43%RS1O(C8Ar_u-^8_H#WC$B|mF@Nun7Axd#7wHrH z^ohbFNBo!H>G;#JH^3hm;Q#BN{3(9^&X@SpwaiQMvxFRVkw4eZ-$%&N|E2Hm_|x%t zfIddZrbkHU|H(hZ&o5u%AEu4Cq<^H4<1X@#@$-)ra{Pb!Cp!LgJQLtg@XMF^C;c=( zeefmvnd7y>OZtn1+~XpDiJ!kz$UXndU*Y)EaUj5-6yR_779F%Xj`JE?jpp`km2H;C zskA#n2dz%4*BYjeo7}6Stg%eU$%8MWtvBXt3yQ~09=oWqth}~zXjx;~c5S}4kcMgF zCj0L$>07KVy_EhkA@{l{Z-rl8vyglLSDrupfiM3ff9Vq2T5bK`$p*&dlIbe#8gE{! zcD0bxS{+~dLhQDl$t~KZzk~k=ezSI~AAXCFGh4OWgq-Ccyx1AqR==!PA!q+r)*X&N z9Uli&kcSI-gpfyW*FMya zY99%Cl#oXYd5o{Vy=`;+m)gny6Z9MHTkSg`j}!8EAy4q7cCbYa`bqos{{;O_`(67( z$diOTS;$jj2mDCj&upUN+uHLx_TIdm+Piv?E@|)UcKxz| z$Z0~JF65Cyo+0F!JM|7WUGJoKw&^xq$OYb6LM{|?v3HhtmanX{E!tPy*|y$e&_OqK z%ipDqbL%So@7Hb5ytn0hAr}RDRqvvAZEMTQZF(0PN)%;ELq*xLsItbW+&&pOQ4_{0 z-?Vk*e!aW5Z!4kRpuaa#kJX*tygT(cJzh_6+^F}^d+JF-E)()>Ay*2y(F z;Y;!en1-@El~!S4lstKXsTbeK$a zTp9d{evgo^5J7Ks;2F2vtD({ z`dG_r$MSXk$u`3^mkiJPPCIO=Ey_C%U4OpKdfg@KLBDnA)K~Pkym`CyL;9=wVf{7z zb^Q(fP5p?FuNQKQkZ%z3CL!M_WS@|467tQvaOv;p@8Tj-)PFdaZxQmXLb7>- zgnYAC5%O*OHfXk7TlH7ol2$+0%@;S%)W6lw_``jtf3N?bpVEKSPwPMFKMVPGA#WD) z79npHa;uQH33qP^eVz0?MpIjpLp6E z9ld$29cPGk&$JqygnZ}!=FJH4_T6TL8j6s23Hk1eQaT%&VS016b*$)EDcU_J+C9Im zW3>?`+8q$`U9kg~NBz6%MmMAT{|_A{KmH=QX~Y}7ym`Bf1S8StVe~YT z45yK7xP*M4kar9Dejz^~HPVc9Bg4ovvV^=>$j=D*u#jI9 z@^K-b6!IBi58d~jX|vmg4>Cr2^X@bT8$*ntM!qr37;cOZ@*_gtC*((k{Fsm*7xEKA ze)3M@a2aEWnX3|%@Xp{zT7a|PgMopzFRNuRk644r9+`nYRvJ6DKpBA z*+zv?X;c~2LVi}r`-S|Rke?Uw0U^I2W1>Wv0(u|JG|Cl3nwB_Y2e z%IzOBYyA%A>v zn;-QrJSOB%E;dYO<4NNgZ_YO3DdTA&9~1JY+d5uh>=*6c6!K@2M$N65)VOS7^+oc7 zf0G+`8L#+nF7lYhVdG72-ge_P<8|W=A%7|46GHxKyK%%{At8V53yZLo_5RQ}=FQt- z95p^NJ~loP@;5^MR>q+H=Bg_g_dhVGo`)a%jHqc%<#H$87R{tDTye)tnsFl$M^E9F>)p-u(AB+nvp> zW;bt6s~Ilj^Q~rtkT3i{axn|>v2sCG+3ZUjVD>PR{Ggt~ z-mcYSI)y!G+T?~s3;qe|ZKnDmDZ(zbnrXr=&!{V#Ti^R}63=H$R?XaA*Nm$1vz=GK)}S1!C5p|e?F7G2U+EbJi{O{Q6D zmi@b`73Lgo-gdLntTL;GT@iLw*d5z(nKh<6P%2H>jS|J@j<&6coNq4tyIssiVb?Da zFEN+;n<_qeg}L(YXT9kWcI$tGWv=q}-EOWn*O-?Jdzi3?3wxyRtr%Oh)J534`kG>F z3E|h9oBsVEH*Yk3!ro2TBYaQB*rLO4Gq?N)u+?l8_U^(S<@-IxmN0dvdA~PrmwBhT z%e>3H+q}oT*Syc%E$lJE9xLo|!X7W|3BsNz>^+3N=Pq349^yzaA2#=zkC^)c87Fxa zZ?UjX6!zJ|ULowYzJ;;2JJsjR1OAdeFYL}%^95l~_BrEhX+vK&5BZm05q4Lr`Kqw@ znliR*ZvDlL>1@7XzU|G~ZoX+AG2asQ-ol{-H|E$lhMo-6Eq?liwKzcs%z zzc*9NQ|6Dt-dEW3gnfjtj}dlB*vAU{IA3MF?Oye)`KS389n5p)dGmsWW%I=(*rs+I zD(wA*y`QiT_lX2s-)zh7E!l2eX0^9E2>SqGA0+I9kI=#DWOX)E3Px5`);G?rnp?R* z*ar%`6nOetp_bya5^ddswppsrNVLU;2jD#e82<-K|E*;OZio0lZguk_?8AgT z-`^)yR9oHuK2f#r(8e2W#rU31w1ox5(%`=^eWwy__Mmtx!S{Qj&1FmUCG@am2PIj( zeWg8YU52Juskp3kVIL{%qXH!z?f+P?J@v|mUH{i#s?1tYSyoX}U)i{%VS&3x&z9Yx zJ>&g_2prd~ZhAHobZe$Juhp6%>@$21 z^t2^+nq`&x7fOV^!1q;8TkM!htNQP;vgQbT(f`KEa(nx3vudq6VJ{Z;(u=Lr*_vl9 z{P(C>jn*PzpC#-izG+FeXwP!1`9FY_mPgpjguT3QNJIU?hT6(O3o6TE2QC=+uk)5^ zt+6iu*F(fwZ*90F?n+^=yeRG(zqo6y>x8{Z*ymmn*J9lm_MhNB>n35Z7WO&55T`9- z`0du#{{UL8ZNgq7?C!Sb|Io_vMb*`n3)(c+x19;!wsu-~(!ttg-9?3Uk9Dtafzvi7 zs?OhIOND*WyVh>&e(M2i4;`!rt%q#mtb1+at-ZcaoVM|vdSPEA>`T17WBxrP)}z*A z{vjbj*c*ktp>0IG#cc_mu%7Wx$+Ome>p5YcC+zcueZk*Ta&a`;hUPz0a42A1_>b3< z@7`ovhY|l6h<}Nt!ZyzT&)RFf)Hn9sP&QWT(T`W=wllE z7fJscq+`~nz7m(MN6=?9_%9NC54dc4&~fWa-$9ox(US?meIa-R1doT{=@48F!4(iZ z7lPdoTnE7mAh;2Nmq73}5WE+HpM&575PTGZKZf9A5d0Yge*wW?Lhx4*d=i4ch2ZZY z_!I=6hLH9U(h)*BgC`^eLR1LRA;f}^t`HIdAyE(#3n2**(houkAfypO1cclKA%`I3 zV+c71pI2V)Bu+rZcX#+_i?1;$fgya>iiU>pYHbuiw@>-`8JsEg82cMN5T9K%wNF*tj=JCfTe)t080m}60CZ#n!suXYYkXy!CDX2 z2Cz1Q)dJQguzX-`0c$5%hroIZtW#k94%S&e*Ez5*Kv-u83xTk32#bKQ?hqCYVX+Vv z4`GQAmH}Z|5S9aBeIP6k!umtlKnSaXuvHLt7lgeIVW*&r4qYOkOLt#Mnl0Qj5V{P8 zE<>TqFz7M@x=e&Flc9?nx->$U#n9zC=yEf3xfQzXgD%fPm*=3%N$BzubU6cERp{Ck zx^{!E+0b<$bR7&`z0mav=z1k|eHFUC16|*Pa1%V?-61?0!t)?JAHs)2_$UY;1L0F4 zd^&{Bhwz0Ez6iosLilP3zY4mjlQB5#1mR*2jVk-H)C0f>AMA|HmxMcT+XHbALEIw{cLL&7h#v^?4G_Nt z;-7%{mmvNXh(8SRuS5J1h<_X6zlQkJ5dSm8{|@nIA^tB&=l}_wAVGly2PEi_&;=60 zAt3@1oRH8T4++B|VG1NngM>mzD29X*NT`5>DoB_E3G*T0YDl;d5^jQoTOr|gNZ1Vt z4?x0$knk`hJOYU_B!)m@I3z|uVjLtUKw=L_OoGHzNKA*sOi0Z3K;i&MtboK?NL&wz z8zHd;5^sdWn;`KHNW2pgABIGKfJY(maY%d;5}$^|XCd)0B)$%bZ$jc*koXQHz6XgP zK;n7m(G_|Of*#e-<7((}AM|(xdIUcTJsyJ|&q0p^zNQ@8LC<}VbU!5Rfux5ZX)h!_ z3Q12u($kQ%ACeA0(m_ai1(FU!(i@QU79_n3NgqJcN04+3l0Ju|FCpn`Nct9%et@LY zkaPx;euJd5kaP~5fHMf3cJMemfU`3=L&50)rvc6|aE6035}eWCj00yPIFrEX0%r<1 z)4`bq&RlTjfpY*j2ZJ*ooFl+F8l2<6IT4&wz&Rb91>h_OXDK*mgR=?`&bi>M1!n^| z7l3mSIG2L637jjzDZse~oa?~30i0KZ^Ez<0fb&Lh-VDy$z_|sS+rW7TICp{b9&qjk z=N@oA49oulf%92#J`c_pJ>Yy9oUelOb#NX5=R4qhADl@8YItzS zz5~6zl_50*QWX!RWs9^DfAI5HcTz%y%L4 z7-W71nP(v@2(o0z>IGTZkd+Hr6CtYrvWg&UA!Mzjx(TvcA!|Ei9fGX4A?sbp zIt5w3L)KZy4u|YG$WDOlQII_uvZq4!V(>rNvjt?|3fVg#dnaT+2ib=p`!HmmhHO8_ zUyu_9Inj_43pqnQkTVu?#zW2$$niqXD#*D5a&|+`1Ca9u< z9R#^UAa^$8)A(C;DW_YU;?1p0jn{o6tRPS8IX`uBkTsn9x3x|4A6o83q_Iz=8pNV89R;U`RL& ziGU#`Fk}u4sevJPz>wWAn9@F!sSuQ2=qjIhCo@i1Zrj3|H+TVcdq zFybB(OmOw4UGC3M*RY#b71r!7(E0=uZGcA!RTvX^lLEsJsAA~j0uG?VKAmE zjF}B%YGF)0jClmcJOg9)!`Mg|n+Rij!q|B*b{ULq^1#^VVeG3g_B9x1!np1*E*i#7 zhjFDat{lca1mm8BaZkf|8ODdecm>86!1&oPz7odohVlDg{9`cw989pog!V9DI!q{q z3FRVDfsHya6U3g~`WZ@(Gv{2~!ebN>A`i znGaKz!;}>;YSs(|W_S9GKPzrZvN~ zwJ>cxOnVcieF)P&g6Z91dJmYM1k>wa`eK;A6sGTk>CfU}`g1VD1~WRsjLtA)1k9KS zGbY1~7MO7x%-9SwK7<)xz>F_pW*p3P!OY$;b3V*m4l`H4%okzi>oD_8D2RZ91SseM z1#_Wb0TeVs!2?k66cjuI1UjxNEq4<6%-UG#l zp!jVleiw>=g5p1+_#DiNgIO+^)f;Awfmu^w)^wP)0A@A8tY($@Ngu0wp&_aH~3d&AG*|$*kJs!$VLD^|2`x(lC@*pU; zLwS2B?+E3cp*#f2Ehz5-<-MRh70NT9JR8dUKzTnX9|+|`pnMpVkA(6uP(BsPr$c!I zlrM$yesQd~lzk$l%pz;r>JP)2K8&pY9)d8wHK~)G;b%UzjP?ZB! z`A{_+s>VRoc&M5LRnwtrCR7zcRV7pjsM-KkS3%XaP<1_2ZHKCzP_+xH?t!Y^P;~*S z+e5Vh)fQAo;-NYUs$-!#0jgb4odVVAP@M(U{h+!Ws#ikwdZ^wA)h$qcBUIlE)jOej z7gXPMma38;P=s`o?n0jNF*)o(-fyHNcBR3C-vkD>ZgsQw)0wDZ86NSKokbKEdz zBg}aO<~##)_QRa#V9p_!a~NtSLCsXCnE^G0P%{f^%AlqKYO0~825Rb{W**cmgqp=r zvkYohK#d1#Rzc0>P_rItu7sLv{OcQ`<_4(oLCq~tb34>*g_`Y9vlD9Wf|`4w=6~a!F?ULTfluIxNipcZQ$Ml?rq?{1Khj7 zeGj;Id%(R1+z*3$AGjX}_fz107TnK+`$cfS4DMIK{W`dhfcqVAzYp%C;Qj>MpMm>0 zxW5ATH{kvr+&_Z*XK?=t?mxi&7q~A#Z9Axyp|(BLc7j?@2-K=jt3#~?wOye$0&1h6 zHWq3VptdK}CPQs+s7-^~OsLI)+P+ZRA8H3d?NF#44z;78b}ZCRfZEAWI}K`QLTwS$ zmOyPe)K)_69H@1Jr?wty=R<8H)GmSAVAZJ73#Y}eK)A@1NB3oJ|F5EpnfUTFNgZ4p#DXue+lZ(K>azW zzW@yx(9jPW20%kOG`OLm4jMK=!)9pM>Vbv>&~O+UUWbPBFz+&$*8%3Gz`R_T*B9nZ zg?Y1JUMbA;z`XS^Zv)J`3+6ot^B#tIM_}GjnD;TvZx8cTn6JV7Y?wa~<`0JXl`y{^ z=FfxqH^Y3-HkiKy=I@93ufY6QVg5;&{}aqV0}BjT5D5#SU_mzc|69s}!LVQ!EU1PB zb78?oSa1t0xD6H@fCYzP!RxT#TUg-d_!SmLz`_Jr*aH?0frVpW;W+Rttc8V(VBr#2 zxCs_+hJ{;U;WM!CC0O_hEc_Z4o`!`#L!%Cj5zyEj8grp>FfK9$5SrEItoQVql3AmbhTaI9M_rmdu1D3t`C$Sh5n9+yYCs!;(8-$;+_h z2rPLUmi!1y{(vQa!qRTAG#-{F!qV}ubOtOffThi_bS*4h4@-B!(mk-$^AIfk43>Ta zOTU9<3M}ga%few%#Yfu>iX=~rmF04r>;q9?3KgB2ODVkWF8hZPmDVgsybffbuz z#XeZ^EUb7A56w1c?g-7Dp?NekPl4uX(CmfgE1>yGXuc1cAA#maq4`5-{sNl6gq6U` z4zRKltc-`1ytXvH%uY#4=z{*Ep~A-o4;`8obYf07M56 zoj}Y4Q4XR4!~-B61Mvi`@^pYz4p^nbsuEZ=2UgX%VKL%?pSQ`avV_cHp6htyeBOA5H(un8mw96eZ&dKcYWn2S zrx$(Bq0htgd5k_!&}RaDX42;q`s}Aq9eo<;dmeo+rSIkRP0;rZ`u3&o=k)!CzRP(t zoj1?o%`OSvyo)y<;LQhlb0lw0;>{_%S;d<*yje@X%jow@`dv%Ex9K;KeuL@PK)=(x z6})vlZ{5OMzvHc;yfvJ+M)KAn-fHBn?|J*jynP98U&`BW@^<0_-X6f)TX=g9Z&&lq z`Mh%(?_9wXh-$4JD85BBrnWqkNc z2_Ig|hr{@AJReSEKywD9GoU>KZezeb47is8V;C@%0n-`K$UqEi!oX`7_!|b^%)oaT zIEaBm7`TCfI~ll}K{*U6WKb^#{hdKiFz6`;O-(RpE`#PX=mdkCGPpT|Z(#7P4E{ZX zKVtAG29IHIJ%dj%1Vet#kY6$6Muxn>koOqUpCQv3vXCK*81e-}zGO%RLsm0n8$)(7 zq>>@k3`y)~$RUP&&yW)gInB@}3~kEL77V?Fq4zWNeTFV%=n;zBP@G9|7RA{V=TUq% z#RU{!PVqGqUq|tc6#tguTPgk%#rIMCAjJ<;{0PPWCQ`6&1(g^vpP=o&tHB*91T^U+j3TE|D*`DiDl4^jFz zO8-IWzbJj2(x)hWmeT+7L*7{W5~Z(F`Ua(MQu;Qf?@{^zr2{D)LTL%5rIZe%bR?x? zC>>AfBuc0LfHQ;APbi&B>8F$~r1T3)7gM^F(&dz{q;wUfYbo78>1Il|QThYXN=mCK z-B0NuN{>)lM`R$li{!f``;#0%axlqak{^>SBRPWPXp-YdP9!;n zL2@_ADw6w19wb>q@+ip$ zlHZd&LGm^jPRMcGZ1-AviLdaKU4M>%Kl2(-zobiW&ft^ z3Cf

_3$KkFpmidzrG=DCxGb8`T$j2G^BqKjy zWC2M*o-5&og=?qbD(X3ZpkLdMBfI zGX`T?GNu({&S%VJjJbj_e_+ggjJcmNFEXYtWBM^>EMumT7&C)0s~NL}G20k(lCjMg z+k&x$j6I*RKVj@|7<)To?_lhUjP1+VevF;S*jbF7&Daf$-O1S9jBCrdPK-N?aX)6< zC5*e2aSt=@F~&W?xB(KzeayHd;}$V)8RNcXTovPL7+1^qE{xA-d=JL|jPcho{szY1 z%lL;G|98f}%J{b#|1RS{Vf;eIFJk;=##b`FiU}>4(2fabGT~w-T*ZW|6HK_D36C)0 zpG`~b3-iA|ZS(5pW$H4fu3_prrgdamHq&yMb}!Q&mN4z_O#6^&A2F?zX`eIg8>TI1+F_=B z$FyTi&tQ5trspvIQl|fc>DMs*5vD)M^rx9Vi0MhD4`cc_Okd6PwM_q>8BLgx%8Z`O zIF}jcGvfz|bjBZ?Dv^Hl-VWB-ofmB%s#-JpD^bN=3L2~|1sw^=Dfk2ubETMoK?(C zXYN_d?ZVtUnEPkuKET|OB<4zLP@c^5M8V&?sedCxNMIp)n}-WSaK ziust|lKHKee+%>PWd0wSU&8#6%pcACUCckg{6lFg_p8 z=M!1fibWk*)R9GZv*8o396w_^mUduiN0#2h(uY|3FiS_UbRtV9v$US2r}zfn z+{`z3@XcL(^9kQ9$I3sk@;+9UuyQ0TN3(JXD=S#Jnw9HVxsjE- zSXoJVP~MjEbjsUN-k$Po%5y1yp7IwdPrO3;>y-DU{4L7grMy4o11KL%c`@Z5Q(i{- z2+BuOK92H;#(@psaQ?LIx03&v4x85RP3T+4;6caw`c+oH&FXhqy^PfxSiOnWiLI>O&g#9a-p`uutT~r8=dJ*6U0I*a`UhD5Z`MD~`lnd`4C`NH{mT;8FJ=8Y)^A|_X4Y?IeHH8X zvZ0U-7qQ_IHeAMrE7))?8?I+Vf(`xH@HQLXV?%#76tm$YHtc4@5jGrULjxPWV`H!} zg^ic8@j5o%z{VRBZ2S!yZ)f8jY%F8rBsNZA<8(I8WaDRST*#(WHnn5ZnQZFFrp|22 zWm9)H{eew?Vbeow`Wu@bVbhardYVo1*tCRA->~UhHmzjS1~zSC^AC;Z&Dm_uWph58 zd$9S(Y(9_8PqO(XHowB=*V){M&F{1M12!*Z^Ex(fVDn}+Z)I~8oArdHw z8C!qG)~nch16zO1)>qj29$WjfbpTrju{FuoVQf9gwlmn)nr&%pYsa=swq>#H_Y$_< z$F}>~_8{B-%C>*A?QyovW!u+mTgtZOY+J#$^=#Y7_6}^%WqWtF_h5Sg+s|YB1#Ewj z?QgUFUAFgU`-g1*i0!3puVwpbb_6?8+0iV)jx=_(W5+G*xQ8A0vg3Ys{DmF=WXHeQ zF_InA*fE10pRi*NI~K9yOLk_kGmoA5?Ci<8J^k48 zA$v;LGnPH$*)xMZpRi{xdp>2)LiQ|W&vN#xWX~%0tYyzO_EfXy2zyRZg{qcRwWcbK zs`gZ6P}Pm9T&nV^O7x_vh^h;yx}2(OsrnsNw^4NuRrgW#096lB^*5>>r|K!Ho~7!) zKLAkm22~$Y^)XfBshUXDEUM;EHJ_>lR4t-v8C5H&s-S8ORqIJq?VxHORY$2tbrY&v zQQem6c2swux)aqoRClMk2i1jC_on(ns((iHuc^L?>i?tq4yyl1^`EHzGu3~m`kz$) zo9ZX1ewykRsP04c5A}cbAgYs852tz*)nloiK=n+jXHz|o>d&bDoa%3=UQP85s&`R+ zfa=3k*HT?i^>^$I_NKD8IeX7wZyWX&u=ls@y_>zSviEKF4rK4}1bauacM^N2vUdi1 zKVk1&_I|^;KXW9&W2KI}_jUo-Zlv9CS*GT7IdeO=gh z5&Q0D-{0BSpMAyb8^gXI^xu68*!Ma6zG2_D>?>#AYWA&T-*)!xV&5M2?PcEq_BUt$ zkJx`X`yXKcKiK~)`(I}NyX=3T{X^OR5&M(uAI|A*+- z6^LoaY>6o+PTXbXq-aOfz9PICB+fph;A*81gPP9NbfKmjHM!IjQF9J8=aQ(ofSQY_xssae zskw!kd#JgWnun=*gqlaGd5oGTsd=877pZxLn%Al6OHF@jim4ev%~WcpQ!}5M1=K8} z<|}HJP*Xw88fw;4vx%Cm5^AcbspCj+B!wfbIg-Ya_8iIJNN0}ZawMN4Jvmauk#jh5 z5l61($ger_2aepyk^4FFAV(hN$Riwilq1h@j=EzqZS)Jg>32K{C z+n(BNYICVAqV^nW&!zSPYA>SpN@}mB_8Mxhr}o#>{+`-K2~+83#Ph1%Dt z?Mv-j)DEC_Ftx?heoSo{wd1Hw%%XMywackpLG5~KH&MHl+8xyHruHDUHPjxZwt?F3 zIogDyX&lYuXitt7a`b$TUdYi)IC>dJf5y@4IQlD&-o(+HIr=+}{*$90a&#Dpqu+3J zHAi=F^Z-YH06$J$Ds|1NJA=A5)TL8*7Ij(FWmA_&-PzRrh`LLu`z3X^Q1?6P{z%=Q zsQWW@f1&QL)cu>fC#ZXxy8lr3Kk8nSQ1?D{L#Z20-B{|TQ8$yi+0@OW?lbDXrtTZ+ zzNN05y4BQep{|O$Bh;Ou9`!A$Z%us~_3f$8puQXRxzy)V-;??x>Mx-FD(Ziop#Bfk z-%0)b)IUi5!_+@Q{iD=BL;Z8qKTrLO)W1UgThtGr{$uLLQ$LaVS=7&=em?aJs9!|= zGU``QUqSsE>eo}hgZh2cAEf~ei6%6(qM z!y+2KqG1IM6*R1&VLc6-XxL3d4GqU=Y))fK8r##DL1SkcyU^H;#-215(RdDx=hAoq zjhEAS4UMDEnX?%>vCuw|!#^-2!p2pW{e3!<-G>)WkG>ube zoKE8`8t2eBpT@6fTteeA8duO*LE|fz#Ic?n`w7P`;@D3)b~(qcB zaqMA^J;Jd^Iraj_26L>KV;^&@jAO$&Hi2U^Iktvln>luze9N@$u zP8{Jx9VeS|vIQqwak4EZ(>Ynp$q}5K%E{@RT+YdroLt4pwVd3*$<3VH#>t(WtmI@h zC--ym5GRjt>I_b`;Z!1>Q)hCjBd5;dR2HYQIhDt$vpH43sa~A=5vMNW)D@h%ic?o} z>S<2B!l~Cd)rV7Wa%usmmUC(arz$wLn$s;g-I3FsIo*ZRT{(R#r~gFa^nILufYT3h zdMKwyb9yYNCvbWar+0Ju2&a#7x`ET*3Fiox3YQC43O^T~5?&Tw72Xi~3ZDtfgl~m% zVU>s$A~HmD5|JqpktO055%-9=SH%4y{vu+Ch*2WOh!`(ol8C7yW{4;ku~Eck5nDv; z6LCPqVG*?=>P37fnsg9Nx{4;*qDh`;a<*u4sc3SOXmYz~az{cmxl=T`TQs>>G`U|i z`Il(&ifA%WG#MhA6pJP!M3X6^$xPAYbJ64*(PX!1vR|a6ij+)|(o>`qij>|W<;NoB ze35dYNcovaxk{w`LZnKTgBrts?asu=8C38qUj}~>6N1CwW8_oMAO?u(?5u&cMZJmxwtq{CYp{AO-GBS z<3!VmqUl`GbiQc1Kr~$>ntmmkE)h*DMblc*v`#c_6ittbrYA)+qFEc!EKM|PFPdeD zW}QW|E~44RqS;lV+0~-iHKN(|qS@V|*`GwSdqtwz{i4}}qS?cu*;As~v!dB^qS+gw zSzpoYEz#^<(X78{Hb68RE}Bgg&E|?`^F_1IMYAtOv#&+7<)Yb2(QK7yRwbI97R~cT z^IoF)C8GJIqWRB7^IwYQ*NNu8l8ENFh~~c+&2JaY?-b4763s`8<_kshrK0&V(R__) zzFsunB${s(&3B0AheY!l(Y#SKKPH-=6fKAr%|(kdM2j|}MY?G5Q_FIr}ZmYqe*T+y;Xv^-C={E2AwfoPQ!t%iwKlSQj(qSZ{%DluEM znkQO)CR%+iTCEVRDnzT*qSZRlYNKeiS+v?JTBnHC1)}w>qV<18>sLkVw?*p#qV>n3 zb(v^APPCpXTF(-#mx|WQMC&!8^*YgdqiDTZwB9RP?-#9)iA3v@q7Bg|MYL%q+O!mH zT8lPmqD?o^CRen{7j1fqHbtV%Iik(EqRj=O%~hh!)uPQcqRn-p&CR0Ct)fjI(Pn^X zGf1=mET%^As(q9tkuZ#4)BK<9q{=P{6P^1qM=}D13R;14o z>9a-pry_l!NdH2lFA?d>MEVMmzFws75b66w`T@}{M7vaxXxCh{J43Wz&7$2l(Qb!mw@VXg^%EA0^t4740X8_LD{XX`=lXqWxmgeyM1`T(n;)+E?iCsLi;M?F#=|1xF_H0v$aq#{{P%~0BBP(k zcw1!j7a0RY#$b{0k;q7jjNu|3WNZ={)goh` z$T%)CPKk~oI;M(_%|*vEM8`IwW4h>grs$Y2I`$MDi$uqBM8|VQ#|uQqi$uqtijJ3y zj(-##6QW~((Q%sSSRpz#icXoL(L)r)6`fX#PF13FE77??biPq^eq40^uju@| z=$sIp-w>Vqiq3C}&VxkfA)@mb(Rut2o0CN6siN}?(fJe6d9LXEsp!01bY3AkSBTDQ zMCbLQ^Cr=GtLVH#oYh>M)k~apvpDN9aaOT7Yo0i3mpE&`IIB)%hR946nQcU7XOY=e zWEP0bB9VE4$V^-$GOrYwzZ02vip+nC%x6VrvB(@FGRKL`PetZJk@SlT)uPK@(WO>&IW4l9iLBNlD??;;7FlPBtUQsGFS2@y ztRj(hfyla0WL+Y%E)!YTi>zOXtlx^PTSeAwBI{0(b+<%h-7B*07g-OAtiOt^e~YXq zMAnlc>v@s&qR4tlWW6J@-V<4aMAlG|^^wR*imcHhYn;fMAhITltZzitPSLfA=$a@0)*Ba6Fr09BDbZa8IH5J`jh;FS!x6Yzlrs&p1 zbjuRm3PrbGqFdrp(d`P+?JCjj7oyv>qT3Ck+l`{zZ$-CzM7R4yw+BSGheWr(iEjT8 z-To!IJubSvB)W|f-DZn!D@C`%BKu5{eYMEGOJx65WdBWM|3hT|OJqMTvY!&!Z;0#< z#c#3)itHgGyF_G{itJ$`dxFTGBeLg-?9W8@=OX({k^QyEULmqKi|nl;dxyy0EwZab z_CAq)bl~wnIv+ih@9ynkuyu= z%n>>BMb1)@vrObH7dhXGoJ}HUi^$n6a;ikmK9O@!RM zh`e8kyqiSc%_8p}k@t|udmUD-KU7|(?s`~qWf&oeV*w4ndn|6y4Q*B4Wj$^qWcNa z{j|t$BJ!Jy{0<_&qsZ?n@^eIfcah&iDI$M?$X_J#zY_UNME)|7ze40!i2OAof49i568ZZ?{y~vnBl3@m{05Q# zy*Rs>I6GIIeU3Q$=i=|&6g?@5o)JYailUcA(Ho-ZO;Pl=C>kn?N<>ka zC>kM(MvI~eqG+-x`k|&RnlFmJ5=E;-(Hc>-Q50=w(QB>fwL$dSBzo0| zUWuckSA*#Fz36p9^g1PaHy6EIir(p>_nD%1N74H%(K}D{K3nuI5WUY6y?-KlUo3iG zC3^ot^uAW~zFG9XTlD^u=>2EW`!AyR^P=|)qW4Rp_p74!8>07nqIZ9BYwrP~_h8Yx zO!QtRdY6md$Hh6R;+!IJ&X2`8w~2Ee66ZW3&Ux(r{boRFlmE|l+VlU}|9{0fPy9cj Cg=k3t delta 33196 zcmaI62Y3`^_s0EOGPBeqGdpDyQb-SEvmu>?gx-5^DWs4By9t31I%M|&q=|Kb*p;Fn zV&O$WQAETFDq$InOg^ChWb%y8jmI^=8n^ z_E&BQ?_p`$H>h6+&*9J~Lyv?$75Y->%b~A?o(z32^!?BeLO%>W7y5PR`Ou$3e+m6n zu`2DA4vMZAimBL@FeO~+sw64NN_Qn&$x(8ZUP_*luk=<5lwrznWrQ+P8KsO?#wcTz zBBfXsy`D$R;Vx!R*_Qm#?1Rc=vkRc=#mSME^mRPIu? zDfcT6C=V(RDf^WFDEpO%m8X=W%G1g*<)reea!Pqk`B3>t`B?cx`9}Fx`A)f@{I2|= zTv7g1JF8vPU^QIrsz#`hYIn7VnxZ(uMj+tl0DJJdVX`_#Sa{pthi zA@yQCy=>Mu6gX0^4qX*S(v*kWyo zw(ho6TTfe-E!WmN)7Hl}$TrwE+&0!W&NkjQ#a3)9v6b7ZZ8f%9TfJ?*ZINxUZHcYf z=CQ4|t+#EkZS*-i_4IsV`_y*E_OwWaT`ape)Z6Z_1*ej{XyNcUq7H9(huuL z^=I^V^mp~s`g{8O`Um=l`bYZ5`X~As{TKaL{Wtx0{SW<${-=RqG1?g&j9|ksOe5Op zW@H)JMvjqd^fK~{e51EfV2m-w8sm)d#sp)cG0B*0Of!m%I-}m0Ycv@1jQK{RvA}3D z78}cr6&|D6SZ%B^ZZU2(ZZmE-?lA5&?lQI++l?K@PGgsGpYedP&v@84XdE@3HjZia z#xusN#wp`9<8|W=&Gu-TE z#+b2YvgtJQ&E95#ImjGrjx@)bp6TWcv&bwqE6i%M#;h|N&1L3tbA`Fayvkf_UTa=w zUT^L+?>6r-cbU7*d(A!Oedb>CKjwb(G4qi5jQOnjocX+Y!u-Je(EP~!*!;x&)cnl+ z+&p7`XI?Nbo4=Y@>=wIZm+c+xUF?cox5wCH?Q!;adxE{YJ$1a@X)mxB+WXl1+WXo2 z+lShR*~i$&+9%s**emRn_E~nfz0O{5pKo7mUtwQkzskPWey#mF`}Ovl?Az_T?GM=Z z*&ntav_EQp!hY2Lto=n_PVfNBjlRXfH!FEUE)a5mA7bAv7t#?S9TU>? zLOLO&lR|p!bvjZ`1(hL@?7#HdQ#NbSx0{{zNA^E{Cd88J5>lj)qJ(4-Qm6O<2c~{8 z&~?qC>C4koV{&@stn@o~qBCab{?JE#mhZ#*ggzE}2xsUMLh2@@n1Ekw{D1?~7r(S| zZmVCf^vsnrmgnSWrpEM2{|Cdt3wJ7=l`cxK5+bB*A>{}u zS4h2fDvF}EqVt55?`!_v>s$9#*Puuxwv{YSiElL*2&vF_{HwU0DN0X2nL|lcoJyLI z`U(32C5^1_d%2?7Qak<-V=wl6&@12KxQ_D*crH$^an^71A&v z4Hwdgoys6(FivHtkVXn=RKRa^z_0e}q@LrIDSp2R%0y+7GFeDtg)~k`S!Wsc&)sniK+s*t7yK-1#~94NZ<$L|*omMqW9 z&B#xU$xP49S~+8RMtWvOYRo^@QfX8c`zk&U)0HL4Qk=?iAr%R!IKWckJ99oUNGNOl zVRU7kvL2_hQAjg|R2Be~`|`g@>3O}f#jpPc*a`%l$4f=vn1>Yf2}SmvAbt2x*a!76$-Jd>g+@ z3VK6%*Z0wxu>Q(v1AhvNg(A$~m0Mc_B3m$rB*)`f@KM23=HsYSsC(@(WJoHzBPO(&_+Ujc?kfQSygV~A%U^@MUMr_B&&$vMpWaai zsl)v;`;AaX;#5Zq={6zV9w52H-+O#ETp@Hv@Jlg-S^8yN6!ki#;?9oouyW()k4}Sq`QT5kC1llRBP4Q zI8~RBb_?m=fX^PkdfzKQbPsA$m$niwQ1e?IbfChUK964N`{Yt&&@1Zet&!eP-^8iDEu?3J z^lSk6oGPW6(IUJ}yF z0sAX~YD{-uwYqqDZgxg$Oh#^2_R6B=8ND*HQ)6=T|FM|A+3>3fu~}^GTAT1yA)WF$ ze~Jt0XbbW0Igc&WrnH(~7t$Mn0RFp`^R+!KUu_NCt#UIm+9+*yTco#er!CADZtH4` z5Yk&hdPhj7h4lVTTa+!@*3A|pqz{F3Mo3=^>B8UlNLxHr-mXG=+skWoq=s5svNycU zck!n!VJWSqciT*9-taO-_`WZX_8tDYUdw0&yw?WE@%8y-gqGKO{e!mag}(28n&>P4 zqcGy{8?3FLZGg9Mo2|c)KH6p*D5Q^lQNJgI4fU37vkeo{CvR|$j-0oRu#L2hvW@mN z{~FctGa-F0q))x2zVCn9WQ!kgU{tDn!{wUSaN)L1uub&6|7$mShi#H=vhVb-1Jr4@ z8U6^5ot*q%YedRM{F_7|jU-=*7A~)G?@)q7_yT*2{?K<1_wi|3W z+I+Unwk@`;wie&3f5cku^ZocoLSl@NV}+b394%ue=SjeIN8g={JTXt7l@0_)L<;Q#}WObYEoRDpPOysxz zI({c)?M=?vzPDZU_YYN}rM649%Po2BEGZ$szD`HmFScK8zgfoFes7uA&f-XyTifS~ z?a$(|lSa&~s$Md_qG|Sa4I2J=A%N1_X+bR~+F2rkBu#Gls-304(?RR>w_L5Wki-7l z8Cr<9Y?~G;RCRNB=ukvhA|%kwC0&fmm1m87tKvt5aL7 zo?1p*RsEH3Gv#Q#{x)fOLXP_@%K~rNHjh>)dTbSwq|MhL$Y)#bt>&%qodZK&C}*j z8#^hjs-mev$Y}%H)7m-u|r#+Eu>-U*h&6xZCaOT%lu2e zRLJSA8=y=H_w}z1_q0Jgn((I+*dy91A!q)T&Q<<&)(Sc6-|1|$UADa*NGCgxj(^$z z+6mfq{%Un;qrXwx+(y4e$hiUf;e4UpM$F(OqZ4mNcA(ysHYj0^BcFbF_Q+q~xR(no+UdTg)JX*-(g*>?} zpX1sqex?)JOWMmq9xCKvLLRhDZJ7z}3!ls7$il&NQZSl@(U;keu=e2LNZ-qQT$PrjWPZjcTAx{(X z^n0``7F|cTSagdnYb4vGk%P6uC=|6>bkdJ8=>Byzf;x2^cZj9 zEcY@J#M4|$ds-yQFc>#XOt-c+i+ z_^y9rNN2sE^?R)spZZCx&udeCf5uo_JRufar~a+BIbM9--WeOB4{81G_EM${^6jhX z?Yr~!;Fi9j7Hh;veT+A6yFN-EE#!J3&)u$%)yE0BLCEu3%0ewYBBtsy{D5itbRjni zdBJwQNG}$0laLp-0xI=tKVX(0us8sy(QAdgM9529iWN&-M7=)mUx4{~qmY*ic|}Ww zVu_4ctS|EemiQT(0}RXc6+-q1*&ASJ>91JY|5L$QeVve333+wPO@9AP`gQ-3c)fmu zkgpQ*+Ln`wrR$`v-hw@Pi|so7X8jiZ*5FU|+k-z0J`(({kkiWJ`!!>P&M|`pGW%}lR z5Tfgkwi>T%Gd|HWz-Gzt-T8^tcl^^Xx_+b;aYGy8m>)5C%5(ZjZ{cqJdHn_bMg6#b zLVrnrS${>yJ|S-w@)jX)6>^J^ZxZs&LcV1;PW_br8css>S4hy`_7B?ftwO#{NEUC9 zkZ*ew@prg{tR`m0tmyajDjqETX0`NNbNGmSE%+^8@rjafoID&(hy zd`!sC2>DqdKPTkpcaem%Q>`)Em}9sMx3|zPIq@!i5 zv8ZJ1q=^-CYpVw}ES%dUiJbGLDC$=`Fx zr0S*}#%^N|4O7NWnpE8sD5P<>aeph`|BQ3gc<8SR8+RM~|27zR8wdPf+M4Du+f7)eS!fE_u{EQQ)@tcr;4ygS_SljtONETu3 zICI3{AuWreEiZR4miyn1WaYGs>1GM@Sj`UJylrNCA^*C~>?q{l{(llNgUwKH;Wjfw z$iHth6(RpIW6s>B>c*OiS#8;xx@r1BhLEppGwnkDbLymqg^mA2M40|(2s2Vx@oY1r zh1D{>zGBYYv|01!jsL42UCekhu}xW$u(sXg%^s$sO-HJ*`bX{7*W_kTGuvCZ(=^Oz zGtiZ{c<` z+UyrVckqAo%N173)H(GPwQblg=1_BZo7@q?+V&h{jyA{q&&rzP&B@-v9p(gcVuU$K zSi1;ou&{>gz-dl31FwRup~9-R{1IbmjxIIJ{_YsFTv(Mhd9%zae_tg|m~FcLKC4Z) zuxkH3f6Tewf*odqInSIgth%t;g|+Kn?p@3#bMgNk!R8WksjwQtYPN*NS&|}}P4O?l zDs#24h6!tU%lJ4;Y|D?a7P}N7tdT97;w(v1ZZvQ87Vb8E=4Nw?xz%hjZ!&K-ZxPmL zVeKZYF~S-vtZ~8`FRTf|nz$RMc{>RtnRl7n%q7qjuzIL!dfP*wJq)A zExXhQ%!m9d{-CfXZ!`A^YxkBT@s`ZNkC+Gjmk$VQk8S3o!kRL9OvRkJZ7DusKIP5Z zVIDT0G>-_YLs(OV)w#nwN|Jd@SkwHGMz-W6SgOij^p51-;CR z-m)F$59W{NC1EWP);_}8_cc12Kbb$9kwqh_s^>P%shLyVD6ECT+WI(W{$~E(a$l0A zThMm%kCuCrED3`GaKn$0ul(2B9u#ot_s@MBc&$a1y}iAI7hxSJtONY*Q$wx2^Iwxw zhq%C5Z4b7GlquTasgwPW`t{Xwn?^Uxs%Vw8foli4Icjww6uF7N<6uinnZ=-65=_TJG#_iOuV2&+!)CZBMsn*fZ@}_H1DtBdlYEb)2w{7uE@P8&BGM z+4C&Ay|<7*7uJcwI@vo@Sf{r9+}$#<^FaGxZ{arkAYq-MhuApJtyftkZ?H_^;OKVlTGO{NJ5pFSC~m>kMHn zYWb~)B{rhkKKoyQId+$@mI!NUORp45H&26o-v68#_67EZZ5kE{Yx!Rqmijd;vo9Cc z3Sq5k)6i`H*Y35ivac4_N@1NfW5nPgL#pO9H8jTeUv}63PGl=068H`zHG} z!dfk?H7#GKSfWC2u-`~WyU)IvD*IM@ON-rM867j*-#iP1b>3U{o9(yQZ?)e>NBiye zJ1k@EEtYZiyINK|EaNgwBCbH-yfG(D`@>ZGzATAoOhr{Q;CHP*Oli1*H!t{XiK2${;>fkP_BU5 z9#k8s5uiFj%?7m(s6#;=2kI11i$E<0wI0+4Q0IfX0Mr$rHiLQ!)DJ=Z7}QTe{T$S9 zK>ZGEy}&jYY(v2|9BiJEV4DQCDPY?Hwuiv>AFw?PwgX^00=A=|bpb6LvV_A_X|f*u6A4Ek{J=u<$S26`pvRiM{^ zJ{xow=yjkk27M{$%RyfWx(9Rt`aaO#2ct6>y}@vSu@(#;7+b)&35;97xE+i;!PpPR z6JR_E#xXFS1><=<7;k{_78vh=@c|egf$=#QUx4u?7?;892xccRH83N=>;YyPm<3?= z0dpXjgTWjM<|r`7fH@A#>0nlXIS742$6z2H_(hyaK{q z5MBr2Ya#qP2)_Zs?}zXM5dJ8He+1!QLijo8+8(+p(A5TAozOKGy5>RGMbOm)T?KS~ z8oHi;RFSAhHWYhCrkOk>L;-0g+J<*$pCNA#wsl zuE0a&eGvIBME(F#KvX-3k|C-+M0J9wE)W$1Q9U5a0a57?l?hSV5H$dz20_$th#Cn| zqakVnL`{OIDG)Uaq834v2ckAX)U^=hgQzVK)dEqs`3-kK)LjsDFGT$Zq8^5*gAnx? zL>-5ymmumDh!S zbXx)4_CmK8pxc|!?QQ6G8oIp?-9CbDpFp=u(CrHN-}c1F5YrxFIzo&IF=60|iGrAJ z5EBbANf6T=Vp1R`4`N0^%w&kEfS6ejGaF)D5K{*+^C4yd#4LoEW{9~3Vs=2x-4L@I zV)j7H;}CNgVvaz}QHVJPv7I4ShuBz%jmJZ53dE*DY#PL-Lu@aI&4<_mi0uQhgCTYf z#9jrlS3~S(h}{aYH$&{L5W5><_dx7Hh8pOTR5X5(Z_z;NCf%v`<-yh;jA-)FUXG8pIh~EV9*FwDKQHVbZ@y8$`2okzL zLI@=EgoHdu=nV-|AYmpXltV%VB+P|`d5|z45*i`F0|^2WHbKI*kZ?UD+z1JqAz>>d zY=eXykZ?C7?1F@QAz?2hybqql_K=thiPey}1rql`;**g06eJ#l#AhM#1xP#&i627Z zSxEc}62FDS3y^pb690rG3nW<~sRJZ+f}~JLQXxr$q*zGGhNM1_G!l|VL(&9DnuLd> zDUehINhOdp6Ow8nX*DEmf~0F9=>|yhLDEi0x(AYWL((2d+6zg)L9ztNDkN)=+!c}| zAvqe7V<0&hl6ye11CpJPoCC>4kX#AL&5*p#1IZg8c@rdG3(2=a^6ikk2a@+f@&l0k z5G4Nxk{^cT1CV?SlAnd-=OOt;NIn6{FGKQ4Nd6JJE6}|cbT5VOtD*Z2=)M=aKLFhy zgzk?*_d}2}08)YnLCO$F83rjMTDs&}p7%TeDf=MhVMsX$DSqr>NO=lUjzP+Ekn$p= zyaXvHA>}nlc@t9Jft2?k4pP2>lnapZ1EgGrlwTm_cS!jY9PPj% zgQEjDI)ld%0uB`%Iymg$=n9S~aKwNk9vn&F=mCyYaP$O6COC4ykq3?faP$So0B{Tj z$1re=1jiU~j0eXga7+cq3~-cyqYNCC;HU=2Y;d^o;Ft@J`QT^*$6|0S1IJ2mc)_t6 z9BaX`0UVpaaUD2r1jiO|+ystW!EpyTwt-_OICg;}z$3x)Q4~_%icnlm*fa3@_ zo(9LW;CKNXCp_SI1stcq@dh~F2FGb|d;pG*!SNY5z5vHp;5ZMC@4#^p9GAfHGdO+& z#}!DmK&k|(?IE=jqy|H(0;w9Lnvfa}sgaP{4N~JEH4##~L#qF|BsC3EGaxk^QhPya zZ%FL}sr?~!5Tp);)De(68dAqW>O@E_hSX9&0i>>n)Qyn39a8Uu)cYazc}RT~QeT7A zpTTJXXAn4J!I=WiRB-y|Qs)G4P6B5GIG2EP8929t^FDCi56)M>`8GJ;1?NwY2Bfuv zG$*9xLRucARYIB@(&{1YAxL`+(jJGjvygTH(k?>JWaya=Ju{)_6zDk9;}p-H^Tu z(w~L&S0MdWNdFBoB*?HrMhawPK}HT_OoxmL4`j@Oj7^ZS1u|M7<6+1+3>ilt<9*0D z0~u!_vjb$Rkf}jt3S?$MW)5UdfXpJuEP>25ka-PcUI&>6A@eE7d>S%8giJrjSCG{a zvTTs0LzbrivIaxeP{^u)EEi`cfmfb2eyJq@zUA-fW?H$t`#vbR9?LCAgzvY&?RPa*qj@MM1jId;g2ft)zV$%C8$ zkTVE!8X;#nWdj33;a=?@!3@0QsFDKMnGGL4H2u&w%_&$ghHY0r?vte-q>%hWzIs z{{_hZ7V>|B{9mAV3iQr`-Z{{_7_hIP$9Q1wxdS8Uzzd`Rm zpdbtiVxb@&3PwV~Bq*2y1f4hl9v!NX8+7z&O+!ADT=B@~>4LLCaDps*Vh4uit+ zP&g3^7ek>J3Ri)ra5oh0gTnn#_$Cy72!$U*pKj2nJM>9`K69W?BlKy4K6gN$UC`%V z=yL-4ya9dQf<8Y(UkmgNg1*tvHyQf&fWCvE?`Y^d7WyuPzRl3r3w^gi-#vKfyBGSt z0ewG!z8^uqcF?ag^b3Z5>Ci79`V~OG66jYA{c54#)zEJ<^xF#kPD8)Xpx+tjpAP-= zp??ANUjhBEg8u8E|Lf5Iedzxo42XsS$uOV?3|Q=e0bUre3I@Ca1Kx%K@4~=j7?=(N zGhyIL7`PS&u7`n7!ocTY;EOP*0}N7OkOqUMz@V8hs2m2}3WIjSpnG7@IT-W<47vn^ z`@rC#FnBl&UJHY-gTbB~VDKpzd>RJ74@1IXNE{4FfFYGI#0^90VaS6ppQ zA-}!!LV5{tQv;h1D;_I z!mxcX>^uy+1jBxU;X`2fSQtJYhHr)8cf#;(F#IeGzW~E8!ifGbVg!sB1tZqMi0fg* zjWFT_jCcb^yagjW!bls8)M4Z}7&#qA7Qx8dVdOn9ayK4EeheeOf{|ats5BVW3r6L` zsHHG!6^vQ~qn?3LFT<#lFj|JuAuw8j(KBFlC5*0u(R*R^BQW|PjL~6C6pZNxV`^Yb z1B{suV;+MsPs5mJU~Cr{Yj|L+9mbAD9fU}_>vO@pcFFm(Y;T>(>@Ve0cR^;MYq8cZ`_S~N_HfoZc~S{+QA z3)Ak0X$N51qcH6!m<~*D2h;oFVfrwbJ_4q1fay2F^vy8+Rha%ROn(n%bcGr5Fe4FW z)WVE;FryJ>JOMMFg&EI7kp@MPP!tVCWl%I5id;~14;1Z(qDP?U02CdBqGzD!IVd^> zMX!6H=uIek8;ahAqW7WbEEJuCqHmz+0u=oKMVFyC35o|o@i-`+0L7D`cq$anfZ`G; zu7=`TD6WIz1}JWXVh!Elf6ki9$H$d@bzx{S7-UG#Zp?Dt@?}y??p!g6J zKLN!@p!fwS{v3+Gf#UC=_#zZvf)Xo~bcB-5P!a+qDwLE$Nez@NfRcq!vI0sxP$HmY z4U}9BCD%g94N$TfJSDe5$x$dd0VQXlzzv zpmZseu7c7vP9{T@nxfSFxkW<1PHgqZ_j<|vps24>EN zne$=h0+_iEW*&l>Pr%GSpv($o9iVKO2g=4n*+eL-hq6Ubwgk$SLfKj~l=p`6eo#IT%7;MtL@1vOx+y|A1pz;Z*{0u73L*=&~nAI6( z=`hQLSs5^^H_R%8S<7J7YM6Bu%z6%HorGDZph|(NaHxuas$8h*2UP>0Y9LgNgQ^Ko zH4&;NK~*_aRYFxgRLz5`1yHpJs+L043aIj|gQ|^Cbq!Qq4^=*>+6q%K=pS}{UcQW1l7Mn z^`B5YQv#65^Ce2HVF^4J)kxfYO|nrI@C5n zZ8OxagW3&Hy9sKqgWB7m_714M4{9HP+I>*_Fw`D|+Cxx#7;2A0?MqO55^7ID?dwqc z7Sz52wLimb9cCB6>}XfNL_irh%&n zT&3VD2iGic)qrabxaz>w0Io)GEdj7}>1J}dgItZ>q;5rPhr@-YG`W(1k1lLR8Iti}V!1X4$ z-T~Kp;QA0;pMdLgaGeF$IdFXgt_$G$0X(kD;Q9qzzk};faJK`u4DJr#?hNh_aI4_f z!EFb3S8zvxI|khG;7$T}4{)b~yC=9a!JPx{Ja89)yDzv0fO{~whk<(}xW|BdJh=U@ z1l?1?JpJsaF^aL)zzd~i2`doj3|fqNymz2IIA?zQ0F0PaoTz7E_s zf_n?NZvywN;JyRg+rYgO+`GWN2i*6A`yp`e_kjBVxE}-e6W~4q?x(^1EVy3)_X%*n z0`61begoWZgZnhNKLGc~;QkEUUx527aGwYFci_GV?n~hQ8Qi~t`wG-qpiY9i_E6Ue z>Vlz8fjW-{btcq>LtP})b%VM%s7r*p?oj7|x-_WEfVym`>jibap{@_q^@qAaP&X9n zMnK(Ys2c}$6QOPj)J=!FVyK%5brn!o1$DJh=YqO=sGA3#x&=_T2BSOoxUFXqW{JS3|>QXxIu3hoIpYG&~CpKSIMF9%%Rz z<|Vae;Vd{K7#pQ!u)g4*clpiXf&bG35~hX zmWx=p43zikavc9ma0+zX8Ssg6f0?Y0I|Nk*sb{v+y4$IzzWfx%CFR<)4@GOsm zKzV5NYSJ7DErSor{~ zd=*x{3oGA)m6u`VpU`Z9<_Ks`faWA<9s|u&p?NwqFNNk+(7Xnk_d@d{(0mY@Peb!( z(0m3w9l)c4|K#;#d%)xW$#_5T%<>; zCd0;b*q8|$7sAG7*yx3gkHW^Iu<;mNZH51*rTdQVg2>(m{<#nWtOOED=plq2f_?36 zEv&0+Tl>1MuB%(@?z(pO>)MqjqJo%UzM=OnMGy-rq6i7S*O1TyqyR#A59jyJIdkTZ zd(X`C&&=F;&U5edX+ob2`rJaFKhoz<^vS2s$OL^x(`O@ncG9PuzGu<*BKlrJ-`DBe zkG=!wyO+LI^sS-aW%T~`kzk!GwJ_7`oBy6zVu&C z{|)rt#DGM12Asu!a~SXv1Kwc3TMYP-0V^1=iubzk-s!w|ChtAPdoS?bOT4#$_ZIQq zVg|NhU?&E4Vc=s7e1?I~F>pQuzhmGc-p}IwZoJ=v_n+YX=SaN&0`Je|{cm{xTi!p$ zpt=mI$DkV+bO(d(V$fIyO=Hju2JK}~6@zN{;Br2=mJhDugE#qL03QtGgH?R6nGd$| z;aPlm5g%T{hadA{5g(4=!xIudO68*ld~`h@-Nr|E@X@DyG=`7H@zFj$s^+6348EAb zS2MUbgWq6qe+IwD;H3;+!{Buc$z(_ehIC}eUl{T*Lmp+w7=}z`$aIDrV#qHHId%Vk zd^sOq%g5L8@dtcdz{j8R@g6>|8g5Eb`AL|6=klCI3qDuOa_h@^2^qPV)am{(a;>K>pv!f0+Eo$bXUi z{^SoJkw1j|k>rmie**cF$)864Eb`})KcD>X$lpf(9`g5*UqSw13Q{PjPeDTp8d1=g zf|e9?rl2bYy(l=7f^#T1kAh1nxPpSKDCkYW4H61&qu_Z8KBVAN3dT?{j)I94Orc;t z1q&$niGrmRlu)pef;AMZr(hEWdnnjX!9fa60jr{*hJvFE>%g#!8TJ6f-e%YshAm;( zDu%5|Pg;d>Mg zqVOXMKc+CB!cQqIqHrXIV<;R?;Uo&DQaFRcSrjJbQaGQ&FDd+n!tW^jfx@3CTuNaH zg)1psL*aS~H&M8i!W|TrQMjAJeH2zuc$mU!3Xf8FoWheN>yWHRvH{7{NH!*!L9#i? zmL!sGNVX@LO|lcot|WVq%p-XQ$+JoRn&br}FD7{z$={H?n&fXuUPtlAcjvzUjA({!Fr% z+(>c@$?YU}lH5gdFUbQW50R`Qd4%LIBx@-Oic%=5PtmU^N~0*9Q*{E0GAU|JQ9Ft{ zP?SSa7mB)5)RUsqDLRXyb16!kPtipbT}sgv6kSD8Z;F0L(eEj`iK0JHbUQ_NQFISQ zf2Qa!6g^1M-zj>SqQ@wDlA?c8^bAGMQ}oiQ1t@xzqBkgdo1#7x^`~ecMW^EbqQMjm zrKo_SLW;&xG@ha*6s@9Y4Mi0c9i`|OhPPmN7Q?d{-kae!G5i*WKf&nF zWCKS2ijkKy@>)h-$H>PR`3xhUV`L#C$1-v}BbPFA4I|evsv)DAGO9VFE@sr#jOxv( zQ}pSme=_P3M!n0ZL5%v4Q8O4dpHT}KwUJRf8CA~cW{hsf=qyHG$>{4DeFLMPWc2fl zev#257(J2ElNtRJqgOI|HKQvTeS*;^8I#SJ9*jAq{}^*6W3Fe+4UG9GWB$dMrx}ya zn30Sb&6uT(S;LrhjHzbKNyY|ayEFDI#-7928yR~iWAA3{vy6R(v9B_=kg;PKJD#zN z8M~aZD;ax={~3Fnv9*lrz_{*=%VpfvjJttxH!<#6#=XM0R~a{gaT6IgnQ==Qw}x@+ z7@xxUG{!e!d>6)_&iFGKe;?!j!T5(6-?j#xImGek0>|GQOM%8BA!y zgmz3gmkF0L;c_P2!G!ym@E0b$$b{FK@Fo)`F<}-H<}jgz3G11#kqNa-tk1-TOzg$P z^O$%66Ypl?15A97iT`EdDM{GGcbGVmiIbQ(g^4SexRHsQnfMEnQkay=q;5<)lSyYY z>5okME0Z2#(%VcL$fQ9`n$DzoO!|UJdznW z(=KJ&v|&suWZKtE`;lo&nBIu#EtuYt>31;wKBoVL>3x{~0n02qW<17>{>&K6jE|Y|6*Cqy z<0obuV8#(<9AjpCW_D#}cV_;MnSWsBZOnX*ng3(vYYAq4&&*}aEa9{EeAboEy7SpT z`RreO_B5Y;$!9I-^_i9 zx&LME7tH;hxj*oEDxWvu^9(+}iO=ui^FQ)=KA(@|^U-|1na|7ld^d@CIn3+Hyk5-v z8}puE-c!t*%DlPEo5#Fr=AC4IF#osAznS@eVE!QH=QDp8^Vc$e8}oPY#o2suF<)HD z7q9U}U%u$i7c2N;BVTN0K`$1ZCt<+_EO?p)FS8)Qg4rziiUr@Wpqd3I`7-$O6282K zFMrFI@ABm!zWk6cm+<8(zFfmsZTYG*Uv=fHzwy-*eDxGxP35b(d^L}+4)N75e074a zdnfq%CceIfuRrGNBEBBMH^Dc*;+sZ%b1UEciEsYQH>3DwGT%()o1-kO!@?96Uc|zy zSa=N!KVsphEKIVnoP`Hjc$jZ5<=ftTdoAA%cF#Bz|bc4{iA20e*OdA0Fd}FZkhme)xgK!Qx-BxDktQWAVK#zK_M9 zuy_QEN3nPhiz``N&5ynL@g{z}g&!yJ<1Buh!%t25sVzUX=coJm>7V>`O8@!O41Su= zPYYO5hb5=6B#k9Ev*d1;+{2QgEE&#{ku2H4l6@>Wz|TGT`CNWJkDs66=LA3hkDurB z^LPBbh^6&en$FUuEWM4T_pyxCO;6DSn#bXDNPx;+H9Yh2qyJev{(F zI~4b&cmT!kQ~V*tLn!`);$ajgDIP)bXo|;CJdxrl6i=u4Gm7U>Jdff96n{H7kdnic9H*p~#6)QHgVk;|K zvog_@mEBp{la;+#c^)e-VCCbie36wev+@;IzRJoztnA0i?^(Hmm8)2}mX+&Sxr3EE zS(VGG^I3Hvt1e;HWvqIWRfAadA*+V4YACC=vFZScRR>vB$*O8rU&iX|S$zYmZ)Wu$ zSUr^0qgg$c)e~4fiPhDtsmq#rtZBfSU$LecYg(}8kF5DSYyQcaM_KbYYo2Az^Q`%j zHA`5tlr<%+Ss`J~M%HX*Z3ouovG#PI@3Zy; z)~;i18Ebd3b}wu9v$lq{M_G3!>n>&8<*d7sbyu_Q2G-rgx*@DP#s94v&AM@{o4~r6 zteeHUW2~>w`i87;#QMgpZ^`=BtiO@<_ptt6*5A+i2U!0I>mOtNG}bR*{a37C$olVC zzm)aGY-qrS7HnwAhBjVWyv>Gp+3+PBmat(d8%o%) zf(;wlu$hf5*qFn{&TQ<)#vW`un~mqP@t?d7Uk9vgIAN^kK^fZ25>S#?;yTYt;e z+t_+Xf~|M6^&Yl9$kxBHHJ`0x**c!Blh`_it#jD=IonQSTPwD;VOx8)bzoa}w&k+z zQMNtLwinsE2 zSF`ly0GPJEc1*-9_nMN~aaLPwfK9=$c zluxF7HsxPY{xjvvC|^tY2Ff>6zK!xy%J)-#kn&2(YbZa)t`_XNgk9IM>tF18kzKE| zYaqJ@u`8clpR%inT_f2wCc&<$?3%%@S?rq2uKDcxj$KRHwSirm*j2`^-R#=Ot_pS? zX4i3con&_%cGqKf19o>}_jT;Pi`}oV`yFA{{`*z-sB{Eau&0nc z!`U;6J!9FkkUe|YbCkUu*_+GWbGc*hW$eA4y*IG;4))&7-aoPTKK4Gq-bdK`ID4OB z@6+skmc6gAw;y{4vv&l0N3nM@d#AB?CVOWOzT%1K(0&_x_ptW}dylcNF8fm1*N}aU z2EX(~bVyvpzN^`H3;S+m-=ElbANw9)-$U&C2m794-_z`SmVGa|ezG zV)k!he>wZB*k8i|9H`5IR1P#8eDjmhq5U}=xP$}0vK^1Cu#0ZSbNeqr)iy$bnrPsOG>CDuRj>D(X}5D=N~c$fTk*744|# zKt&D}J*haCic6`uj*8z?aXS@vQE?9yf2QIuR6I<@V^lmz#lNX|hKd9gZ&NXlM8z;F z3aJ=N#RMuQQ!$N-nN)m1#aC1;q~d!j7E`gDiY-*^qv8M+r$GNgMJ)$|gDD(L<6t@m zn{hCcgRME3!@<)zcs>WO=3s9Q-X!7RA2@hB2k+wGJsf4+p1l z@JkMsbMPPsYdLfphtfEd$)VO9YR91t9LnKPPY#{Vp|d!2E{D$N&>uMTJcr)m&}0tH zPH<=;hn8??9fvk>sFXwH9NNR7{Tw>Tp<^66!C@S(%i&ZGH{ozw4tL@3*&IHX!1uJ(Y{8Ttek4D%VoEfy&KPZliJ!l~q)pq^c2B zjj3u$RU4|>QQ$=Vpz3X^`cT!Ms=-u^plUKz^QiiQs_&^^*QtJs>UXK`NA-JDe@yjoswYxCm+E;`C%&b65!F9Z{WI0YRIj6YBh_1| z-cI#Sst-_ooSIZ>no-k&nk;HMQq!55Zq(#ba}G7%aNTNDd)%@j_l(|HAjwc^d*kI!_ht*?a$HoIQlV1KjG*aj+Sz?jHA0b zx|gF>9IfHlDFXP|dmMY8V;^#C2**C**f5UG<=8hI`<7#iIQApQHgK$pV@Ejl3&(2t z1;6y=mk;@67{AQtmxcVYonMY}JeA`OIDQ((8*@B^R-^=l>9N*3H8jhcu@Hod$a-t3=>T#kmC(=35j1!rh=*Ed$PW0l$nVdMA z6HjpBIZnL6iMKfME+_htIPo4Q2619CCuVbEE+^)5;!92xb7BQ2R&!zYXm?oh9m>E9(75)Vo^L>n-ZtD(Vfs>BYDrF-_E)DeBD@_2!Ct z--voYih4hbdc~sNI#I7&)H@*RRg2WRBDJ1KZ6H!l6RC|wYKBPdB~mXEsh5b<%SGyy zBJ~=PdaX#kMWo&;QtuF{cZ<}=Me36x^(m1^eOjbGD^gz&sc(tYcSUMnk@~Sn{Y0b= z6RAm&Izpt57O7K2>NJr$Q>4xosY^uaQjxk$q%Ie!t3>Krk-Af)mW$LqB6Yt=Jt$Hu zMg2OWej`!8wW!}#BI;+0`kh4muA+WVQU7#N|144e8d3jVQU6s@{~b~PBT;{dsQ;;` zUnJ^}6!pi5`jbWdX`=p2QGd3mzehAk6%9Iw2Hi!2T+!eh(cnDM;6l;h64Bst(ct%@ z!Ho&g;C9jAF45o~(csUb!9$|KKSYB^M1#jggTbP~x1zy8(cqY9a9lL3BN{dl4KqZ; zR-$2B(J)Ii>?j&`7Y%!ghNp{$XNiUviH4VohQAdJZxapg5Do7Y4eu8X6Mq#A|0Wtf zCK^5|8va`}eCCuvG<;Ju93UFLFHSooPCF?Yg=o}RG|CW-nu|s)MWZ&NQG3xSTQuq= z8l5Q`oh=&uS~R*qG`dJMxXoBCU@|>o3v6pc$o<3plxrD$9u8Xpx+E)z|z7fo&uO>P!V z{vetR5lu!WM3d2?$pq14vS>0*G?^uu%oR=MizeTQCW}Rr<)X<3(PWosvPU$j5KRt? zCe@9>jW zdqw)gBK=X3{**|6TBJWK(qB5|B+_3M>3u}{2O>Qw(ua%mF(Q4uNS`Fqr;GH@MEV?& zzCffe66wVveUnJvBGOAmdbvp7Bho8G`eBh?EfVP`MMeXW(L!Xj6dCPAMhB6RBQm;) zj9ih?OJtlUGAU?EW2nf;7a7YXB4dll*d{VcMMjy(s1O;4MAIIk>3O2*1)}N2qUoigX>ZZ= zTG8}n(ez!>w6AD7Kr|gFnhq6B^F`AYqUl!Abh~J}Q#36XO%IBuheflVqS*za*+rt+ z^`hCS{om{s(d;(S>`u|_kD}SVqS^hT*jAEDVlc@&94#7|00?fiRPb+<_ksh??m$-MDw3S^R=S+deJl7qWLePd97#>qD6{m(L%InDO$7^E!v3|9Yl*9(V~lJ(M`0-6)mn3E$$F4 z9uO_w7cEAL7Ar)HGSQ++w5S!Cr-{rok(n+sn~BU!k;rTZSBuPbB6Fk2+#)i!i_D!ObC<~6 zD_S-ZEzb}wuM#c)Dq6lOT8nJX!W{iHCVLzT(nvvS{)ayJBikLqV?&b z^%h1^|hk)b)xm%qV=Cd>-$9O2Sn?KMC*Tu){ltRkBinXi`I!#{Z-NW z4bl2-(YlXl-CwjGC|b`Ft=Ee-M4KGZ=J%q_qoU0q(dHA;W`t-nRkWEQ+RPVi7Kt`X zM4L6D%{tL$yJ)jhv^gN!))j4kCE8|*wmn4K-;1_)i?;WOMBB$j+owd^r$yUmMcWrd z+m}V#S47*_MB6t-+jm6Ue9`t((Y8pm9Vyz55pBndwv$BLsiN%+(RP+-TO!(S6>Y0T zyN04&E79&u(e7%|?tankKcZb<(e6XhZiqy*`&hIq67437cAtrM^F+IEMY~0!-4CK& ziDv-$t}= zFWO%q+TSGFKOx$`B-;NsA=_Y>{k6YU3y_W7dyr=oq4Xg^Z4A0yh&745f) z_6J2)1CiB2WVIAoZA4akk(Dj7I*F`bi>xa}*3}~G8j;moWZf&W9u`@TimWF@*1ts7 ze?(T|Ig#~-$a+g;y)Cld5m_IJtRW(6w8$DKvL=eGDI#mS$ofoV%@JAiMAnZY>t~Tw zEV5RJtkoiGoygiKvbKn<1ENE7(V?s8aH;6*(P6Ua@V)48>ZI(jN_1E&I&2Uf zHj57124DMTbV#%o*{%jvuE_pIWPc~Je-PO}iR`5!dyUB6CbAER?1Lh^T4Wy;*~dl45FJxQ$NHjUW6`m# z=-6I#%oZIxiH==G#~z~Nd7|S5qT_|4<3*z5Z$-!7iHV39LI^n20iUeW1((dnj|3h?sM09>!bpE&K{EF!Os_6WV=-gLy9w0iuFFJoL zI_Hbd!$jw#=)6R9E)|_mh%Tw3O9Rp6G|{E8=+ahnX)n6ui7sb|E@w+bmtTu67l!h=-OL!y<2qskLdcG==!4Q`d`uYf1>MaqU+3r==z1|`laalz394FbX_94 zE)!kXimp3E*D}#{x9ECUbZaWQwG`dX6WuNs-L4eft`XgC6y0tS-EI@z{w%sZBDy^$ zx;-hn{abWE9q z{*>tcwCMh<=>Cf6{+j6irs)2T=-yX!|4?)<65S_=?kh$2ZKC@Q(Y;J`-zB;q65T6B z_ZrdtnCOuqdSr<~T5M33E~$6k?3 z{trdCeMczh{cM9Bgk+)am?H73mMP8-IJ0W^?6TR|8uRBDq`$VrdM6Y*6uPLI} f*SbZo??taA|MxRnN}c~3TNeM{`2Sn Date: Wed, 21 Nov 2012 11:12:40 -0800 Subject: [PATCH 014/136] Spaceserver agent list, broadcast routines for discovered agents --- agent.cpp | 80 ++++++++++++++++++ agent.h | 23 +++++ interface.xcodeproj/project.pbxproj | 6 ++ .../UserInterfaceState.xcuserstate | Bin 101676 -> 103122 bytes main.cpp | 11 +-- network.cpp | 9 +- network.h | 7 ++ 7 files changed, 123 insertions(+), 13 deletions(-) create mode 100644 agent.cpp create mode 100644 agent.h diff --git a/agent.cpp b/agent.cpp new file mode 100644 index 0000000000..f0f3fe9bc9 --- /dev/null +++ b/agent.cpp @@ -0,0 +1,80 @@ +// +// agent.cpp +// interface +// +// Created by Philip Rosedale on 11/20/12. +// Copyright (c) 2012 __MyCompanyName__. All rights reserved. +// + +#include +#include "agent.h" + +// Structure to hold references to other agents that are nearby + +const int MAX_AGENTS = 100; +struct AgentList { + in_addr sin_addr; + glm::vec3 position; +} agents[MAX_AGENTS]; +int num_agents = 0; + +// Process an incoming packet that lists the other agents in the area +void update_agents(char * data, int length) { + std::string packet(data, length); + //std::string packet("127.0.0.1,"); + //std::cout << " Update Agents, string: " << packet << "\n"; + size_t spot; + size_t start_spot = 0; + spot = packet.find_first_of (",", 0); + while (spot != std::string::npos) { + std::string IPstring = packet.substr(start_spot, spot-start_spot); + //std::cout << "Found " << num_agents << + //" with IP " << IPstring << " from " << start_spot << " to " << spot << "\n"; + // Add the IP address to the agent table + add_agent(&IPstring); + start_spot = spot + 1; + if (start_spot < packet.length()) + spot = packet.find_first_of (",", start_spot); + else spot = std::string::npos; + } +} + +int add_agent(std::string * IP) { + in_addr_t addr = inet_addr(IP->c_str()); + //std::cout << "Checking for " << IP->c_str() << " "; + for (int i = 0; i < num_agents; i++) { + if (agents[i].sin_addr.s_addr == addr) { + //std::cout << "Found!\n"; + return 0; + } + } + if (num_agents < MAX_AGENTS) { + agents[num_agents].sin_addr.s_addr = addr; + std::cout << "Added Agent # " << num_agents << " with IP " << + inet_ntoa(agents[num_agents].sin_addr) << "\n"; + num_agents++; + return 1; + } else { + std::cout << "Max agents reached!\n"; + return 0; + } +} + +// Broadcast data to all the other agents you are aware of, returns 1 for success +int broadcast(int handle, char * data, int length) { + sockaddr_in dest_address; + dest_address.sin_family = AF_INET; + dest_address.sin_port = htons( (unsigned short) UDP_PORT ); + + int sent_bytes; + for (int i = 0; i < num_agents; i++) { + dest_address.sin_addr.s_addr = agents[i].sin_addr.s_addr; + sent_bytes = sendto( handle, (const char*)data, length, + 0, (sockaddr*)&dest_address, sizeof(sockaddr_in) ); + if (sent_bytes != length) { + std::cout << "Broadcast packet fail!\n"; + return 0; + } + } + return 1; +} diff --git a/agent.h b/agent.h new file mode 100644 index 0000000000..896b76923f --- /dev/null +++ b/agent.h @@ -0,0 +1,23 @@ +// +// agent.h +// interface +// +// Created by Philip Rosedale on 11/20/12. +// Copyright (c) 2012 __MyCompanyName__. All rights reserved. +// + +#ifndef interface_agent_h +#define interface_agent_h + +#include "glm/glm.hpp" +#include +#include +#include +#include +#include +#include "network.h" + +void update_agents(char * data, int length); +int add_agent(std::string * IP); + +#endif diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 910b2dbd70..18a6cffacf 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -16,6 +16,7 @@ B6BDADE415F44AC7002A07DF /* AudioUnit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = B6BDADDC15F444D3002A07DF /* AudioUnit.framework */; }; B6BDAE4415F6BE53002A07DF /* particle.cpp in Sources */ = {isa = PBXBuildFile; fileRef = B6BDAE4315F6BE53002A07DF /* particle.cpp */; }; D409B98A165849180099B0B3 /* cloud.cpp in Sources */ = {isa = PBXBuildFile; fileRef = D409B989165849180099B0B3 /* cloud.cpp */; }; + D409B9A8165CA7BC0099B0B3 /* agent.cpp in Sources */ = {isa = PBXBuildFile; fileRef = D409B9A7165CA7BB0099B0B3 /* agent.cpp */; }; D40BDFD513404BA300B0BE1F /* GLUT.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D40BDFD413404BA300B0BE1F /* GLUT.framework */; }; D40BDFD713404BB300B0BE1F /* OpenGL.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D40BDFD613404BB300B0BE1F /* OpenGL.framework */; }; D40FD5FB164AF1C200878184 /* int-texture256-v2.png in CopyFiles */ = {isa = PBXBuildFile; fileRef = D40FD5FA164AF1A700878184 /* int-texture256-v2.png */; }; @@ -69,6 +70,8 @@ C6859E8B029090EE04C91782 /* test_c_plus.1 */ = {isa = PBXFileReference; lastKnownFileType = text.man; path = test_c_plus.1; sourceTree = ""; }; D409B988165849030099B0B3 /* cloud.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cloud.h; sourceTree = ""; }; D409B989165849180099B0B3 /* cloud.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = cloud.cpp; sourceTree = ""; }; + D409B9A6165CA7A50099B0B3 /* agent.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = agent.h; sourceTree = ""; }; + D409B9A7165CA7BB0099B0B3 /* agent.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = agent.cpp; sourceTree = ""; }; D40BDFD413404BA300B0BE1F /* GLUT.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = GLUT.framework; path = /System/Library/Frameworks/GLUT.framework; sourceTree = ""; }; D40BDFD613404BB300B0BE1F /* OpenGL.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = OpenGL.framework; path = /System/Library/Frameworks/OpenGL.framework; sourceTree = ""; }; D40FD5FA164AF1A700878184 /* int-texture256-v2.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "int-texture256-v2.png"; sourceTree = SOURCE_ROOT; }; @@ -137,6 +140,8 @@ isa = PBXGroup; children = ( 08FB7796FE84155DC02AAC07 /* main.cpp */, + D409B9A6165CA7A50099B0B3 /* agent.h */, + D409B9A7165CA7BB0099B0B3 /* agent.cpp */, D409B988165849030099B0B3 /* cloud.h */, D409B989165849180099B0B3 /* cloud.cpp */, D4EE3BC015E746E900EE4C89 /* world.h */, @@ -267,6 +272,7 @@ F68135561648617D003040E3 /* texture.cpp in Sources */, F681358B1648896D003040E3 /* lodepng.cpp in Sources */, D409B98A165849180099B0B3 /* cloud.cpp in Sources */, + D409B9A8165CA7BC0099B0B3 /* agent.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index b310c1ec8998fae6553ee58651446f823ff07bac..13077bb321daeb9cbaa5b117160f46a4a44ffeb5 100644 GIT binary patch delta 39108 zcmcGUcYIUT|Noy!?+ws4$<3XC($dn>Leq3ZTPRTW-kVaOG6V(6bg+^mZdAal)P+(& zz=?{u6>uXCh67ORsgFM4LBRyk1HJ)0}WY09ubk9uB zEYDoeJkQ0ROFWA`OFY7Jx#w!na?c9Sjh@w>TRm$$Ydsr0n>_b<9`roo+2(oD^NeSQ z=LOFzp4U8YdiHwW@f`4c=sE29#Phl5>yYP|=X=jjo?kt`dro_uUYECt*X>ojy4Uii zc+mBd4Kl);ytM(D3T&8E=5%|MOV_5 z45g)#uM{YSN~O|K>7;a4s+2BDS7o3wNExh*Q)-m)$^>PiQmdS&Oj7EU*~%Pct};)V zuPjh5QZ7@LDVHl(gp@0ltCXviYm}Rn)k?i`i?TtvTiK{=Qno6OD%+IDl;@Qfloyqk zl$VuPlvkBq$~(%t%6rNI<$dL#@`3W9@}=^X^0o4fa#HzKIi>vObNL$i8vB~~44>(< ze12b+uaz&`m*Xq(mHOKHy7;>Ky7{{M2K$EihWdv2Lc@I{d?S75`6l@$`=U-3;!?)A-yzd3y zTfRNMy}o_EL%zelBfg`)W4`Zv$9*S!fB63N{pCBYCaKA)TlJ{T)D*S3nyR)@bJc*F zrQV*-2s9&mIsb6a@Em2F- zyqcovnxQq-nrUfT3oTR2(n77ZHd;{2*NU|gtxPM|Dz%PU7p<$-TkE6s*M@4tv{Bjw zZL&5^o2kvyF4mT4VeLxoT5Y9fYqx4^wDsCX?SAbc?J@01ZM*h@wo7|c+o!#&eW)GL zKG(k0e$al@PHCrgbV*Ot-MXghx}m4)X?hDiUC#*V0XiK$Gy|Z4WchS4*-SqBy z4}Fk6SRbMf)yL^I`gpxopQF#!=jrqH1^Pw$LVbz8R1fLP^qcit^*i*ndV_wqzEQtd zzfa$yKcqjZZ_}UEpVPPNJM^9UF8wuqxBj~RhW?KJuKu2WKtG}%)j!t1*1yq1-|ENo zpY>n#llo}`!(n&~)i8{vMsp+G$TV_{TqEBoHrg5$Mx`;>7-9@Hh8e?+5ynVklrh>E zW7HUPjJd`Xv$zwvDQ0uCg_&t)n{CWIv%st{+nXJ?aoNq2LFETGSFEKAQ7n+OA#pY$^GBb3Id9At9TxHgqx0tt@cbRva51WseTg|7< zXUyl#m(4xqUUQ$h-+bTv$UJ0zYJP40VE$-y$ z5^Je-nRTUgt+m>!w{Ed+wQjTSwC=JVupYFwSPxkbTaQ>zSWjBptsU0O)^2OR^|tkn z^^tYRI&6JnePw-T{bHT8e)Z#b_?`YlzwU45Pxoi~v;A%Sx&C~AiNDO>VMNxf1v6&s zrwNH9k&N5!=`M6UXkYBU)f^(^;X)oGWcxB?X;mY-iS0!GZc#UwrDV%Q!=brV6%M-6lyk0o>3gP2!!2cM0b^!uhUnz9*aqg!6sjJh(BjA#okqkspUH zoF9mUHTLgoK6Pygci5ur-1^O(iyf|{SxI$Cv$tNbw#u0-67Klhc+x!k&KUt`(na== zrWwwpOYJQ+na-re_QAXYXHv*c3g+0G);G2L1wGEBE9{vwEqlj$uj6Wa&-!A=a{IfP zxz3~;?4!XPXVNOWD!;^;bd!Dkh5?Rx`-2Vb9Jkqj&g$e$T4Q&t%X22JwZnH8JJ#EG z)&-qO8|^ph%AHB~*v&TXY_Lj}Xj`!_1BG8BSj;4jqq{H^Wk}PM^$9C;KtsS4)CrX;zPnIM( zlfJa?oY&f!^o@O@B*&Tbo&Ej1TxZhvcIy00XVOph1*JvKq?7iV(ttDRH~Y2HHqN9! z?KkF^IFqq++qQ5fC)oYkmOGPOcFlr-Gr5U<*L``8WczU2md<3aed4|nhiZE-%5Wwd z_Skmm&Sby6xLu|*ImK>hSL94ivv=N~>BzA6T$Jlf&a!MtQC?xxyY$UgS)!w8Q0j&g9PagXL|U$zAP-FDY{- z_pnbtnD6Lqcd2OOOzvj~$7ef}2ijsw2geY5Pep+))R|mo-?gZX zGkLCk_~BB=0{ir$B4_d?_SnjNXYwNZ+R6fF@>2Wh%77!;F6wC5e^iz_lP|Y-EY|I` zC8f^ftL()cOYB`+O?%A}%b9$={cuO!KD^cMSZOD9%5o;#c2TEHXYy)0e`%pJ`Br=4 z(p+cq9d^T`#kONxx_xM=+nK!1-oCVjog8vIlkc_*w*?%V?YhoEXYzgaiWM#F!Cee{ z`?h9|2km2>hdYxWwhJHY=Xlg!Q8mJu{J7nvUo7Kvrr}I}$}Z|M%9;GEy`rkrey&S~ zGkJ%7G=jWfAM0Fbhr3Jmtgh+K>dWJngA8#{qkA_g>EA59~RY_i-j4viCmK%W>4MYSzn{ z{HZ1jC-yV!8=kcsf7q+8>h4TFZ5MTkf$7g>+BZE{ zVqeoc&FPkHarGdlyYb&qkA3>;T&Fw9uG-$y;jz=N4BF}a8rg65@i^T+`%s@sr(3sI zUDd`ud`%;}e_!3{w(KK2g7%8OE$l-(T=qM=QtXNSJoZPCP^x`o#{m11o%vCX0m}`i zJJascFK9p6uesBmZLjPaG32p7T&_CZZR``vhdSMP(Uv;YKVaW*T_dNvz~0_}s?%L; zpT2Ie{rC$3d&jFu_5%Y_o$hjb$n|aP!2_Due+_8iba#lV9T~3MonLf2I@uEkj&-`b z*r%^6w!a&gW)FPHWvhb*Io-YN!7mMS^tD&47~pgdun(;m?Q{>eCl2oGbPv0+zSurA zxS7*E()NvLV=J!|ImSdwQC_j^nX7zG_c(j!@M3$yDv!NmmB;CxVDDKq!RbEFF1m4$ z(>=u=GP;euF0EBvOR;^*el-(IPSII8Z*M_ ze!%|g*1k^nL-vF{Jsn%^Rkvj}i@a)**y(;O+@Zn!xJbD3?{|;;DZ6TJTIShTo%>n$ zbK%OhYvaI9_mAPqElQD{yLWiV{jK|$`#bk>_X+p+?jJ-#gGg8}67CiWn?%ArBH=!f z@PJ6zA`%`J30pAPP-$lZoBH^@<91*AHzWyOkGf#@AxhK_==4s(c_hfildNMs(LP`))vXFEk z`GwS6NLfN^Eu=gl6$+_TNM%B5FQkq_>Mo>SLK-BbQ9_y^q)9?LUq}}UX`YZ42`MC` zWkR}2NXvz^LP$3XX|<5n2&q9xcMItrAwAGvNDm9?Ng+Keq@6-~Nl3ed^oEf33h5mo zy)UE>g>*znp9<-VNK;Rwmh(J0Ob#~_3G2fA-8-H@xakC?-0t#Go?W)z)be8q8{!GY z_Qd@|{cU6T#yDOPAr7`5p4#3%a;S;ze%sP2V@aFiNmcb-R7a<#vBJB>!uQ6*z3dy_ z?ydEW;rGY!fe~6!eTwQ(bQXHklereboNz-D3NacUv~TFa|sp1>SzQW#ic~;0gP$cZWA# z5PNva{_EW$d-;2rjTgp%XQJs}zSmL<#enDHWiPW+4-C<+jNv=t__ek;Fig8XhCd(2 zSJ~d9nRZz9*{9Q8^&yYLr+aRSWxW{By2W-JEUKU3i$rgaMPH6b@3JTE?`%JGxN+J) z>bfqL_G&zBqwP4@&3{h}e=Uwb5b55Y_V*JDZEu>#zU`o@JrqlNJ)ZQaz5U=I?eQ4? zW*mRo&iJ5<_FN3#6UU#oQx9~uAN@epUW&!`#bdi7wXqXFRJGS*vA5%~J!kuF)`vcA ze=PoPJpP_t^-huf!iPTXU@UeZ9y=6``9Jb$M`N*r@z`f}#s|grtdG3fm$BG~@z}Ta z%8v$X$7A@RIR2yUI9P1AI^@%SiN%h@W4}epYEygulv(rh|IzAy#u7h1o9J~!TDM(v z)-61w>0T+8@o7AxvE6WJO30fO3w$0Acq7znr%suA!RSeKlc(g@&YoS+t!~!*DYNF!E9gGuqA4@#W>1-0FnC7o{Hb+wXU;1a zP&;YRu+jOm{|5e%Gpu{RNVrQRUR0Q0mOpRug#`=d%`KQdYtoDblcyBa&YfLbFn!jP zf7a8h#>{>hH)}>Gvfb~nrZtTPzm5k}?Ky{sYw0ok+c=&TnZ`YI@865nckxKTE;=&y z%o=qfju+c^9vQFwy+-{I$17~_(f((arJv$>Rdh_;b=2qodrkT!9_tlp-vQBeZT1nR z)uh?8&n^|-zgMbXuS#?wi{VV(Lh3mI?_J~OK9smFOZbGz!|L(kV_T8jF zprACrIKQaiyy^4j6`WT)Z~CMO^XAW;KI{B{j9TyC>+2tJ`{QE0dDl^w{o2RwGi&T$ z@q|gygzVS^_*YxR7FmgS?gjStgR%9v%_oXBGdc<+XFNW~&iJ6nUh;`jFyDJ=xN?Jc zf%hWs#okMVBne3tl1oU9Hh33$7ZLO>5mI9zHHm(%PqN)#Epm<9Jm~VRLA&Fp83k8+ zuaD5L@m}j)?!8V(ZXtPuesp!XdiwGvWxl$R4d ztvG!Ag8^d}m6YbCmsb=m8NH~aq%beNaLL(ynD+zkk#mY3^?ppy`>BxH2q{;{kpq-~ zo%=<0!Pnm7=d8gA@Am|~KME-*r2MFp0z33YZow(>H+! zUR0QuUQ${bZyhB|X>(4gTqO{b>L8@bxKuCCWrJ3X{vTS!N?EvagHocDDs7c^Lh2-> z&O)jZQkM-%xl$3+>MEpef7iwR)lCK6l|JVzzpv6S#_1uXo_}-peAT>Qs4^;2&M;-T zGC~bF4|t&Ab4R12xUkOo9K1MTZSU1YERI;UWYazTVQRXJaorc4*o zU?B|=(oi7{+n`*i%pj=D64G!XjfnC_MtSFdlU;C$vLwR0R9UDjQWgtow2;OKX{?Z{ zHz-S$5J5!;2u7HRBc) zRTNjW%S$gWtynU4QE_2uVbQLFEx4O%u}eDCYvZ;#h9M6UwuZsz0ebr97=XBcvHZnkl4NLaN)KJg00Y zsO%KdY$45wa^^;>?*6V#!ER+wg!j7ghVrKJmXPKPX@QU~64J#Rl)cJ6g38-Mx67*UQ)2*T>gaNcBRxMM$>_>9!5N z{=NaRa&8yW9ejc~0}c`c4t_{Vt@}gtR-#dELJJ=d422p}NjRQX8p_395-gdQ(VmMM-O{3RR{s&73F-4Q_22fz`9Eyi`%84x78keuN7bjPGtcR9mRd(pog<_# zh4fWa>T7%H$yWU@Ru_jWH>j7Wm#PcZMMC;kNZ$$RgphvNpe|9Dsv$Kjq@RTJn~)K* z>+Gj0by;lhf9&kuU%kpce)1OowK4p79KRv*`AOK{PfW9){dK0cDwgnlJmF@$axzcD4KQ5%-8`LL-^hZRz^|KN2=Y;fUTzrRmpZYx6LOLDQ{wrJ)ui+&7)bE<_ zRrR%q)Gi@A8r0oFc1EOHy%mw#Bjkj*)IRk-^=-0+EJvhdDO_WBKGo4)|A*!~sD2pH z_&~^w8q|-3+&HRnG@|jbkekFcK2`5iKaXf6MKuz`HTL%>2ip_>92ELiJr%CJUp=ON zryf^NsNbtUs6VPdsXwc~s3+B5h3poxSI9mgYeF`JYzeukkW+-5D&!VI&Jc3u{pxS( z@9H1wpXy)gX${SxIkf~$(qtjG5^`%H7YMmT$YnzAAmq+M?jhvfLhd8venRfAo*gh+ z<7ofNo}2eNG<(=z!6uqJhA2_wjtCMvi;&eQ@>vA2VmW#g$v+)zqBV~pW)!(Nf@H)H ze-zmfL9$~=vj}o?6GyO#7KkCuqsY(*QW!(hqR8zLBz6=br{BESQPo6iAB(q)#Os5O zNJ8frlof?8ayWvj)?Movu58eH2syh!>m}qIJN1vubgf@_Lj0^;8>kJABn=XBn+9!& zkaHtREwmBg2@TpvAqU>!Yi+bPCNgrF?7dtYr`2fg`u0voOFd7>`9fCq(?px7)oSNC z25FP(XFDBjii3(PI*zp|+SKZS!}`pcJmr$XwezQ~)6S>vpFk{Yx^_YRZl@z9;X-Xj z{Z~#$hftk1C+=#lkPBm%0iSCNBEBvXa?yW%E!1i>HB4h67e{SHdiSiYkdT8)qq8$r zyG*-0-ZEDRx%7jUPO4D}nl%KVeaWDU7|E~QR3H>4D0S($; zLLL~IU88kpctV4oAml;+F{wAwYP8N#kAtJlnDvit(vu_Ilz6sW-K+b;l?}Qgn5P3whjny|vyZ@j4;b2>Bx6+*d!gkz-bGy)fKigI=T;>m_=rkjD#oijb!Zd1l;4 zncg9yR<2j*?S(u+$Pn8QMO5OzndCbu(+{*OtW#9IlW4Um|1lv3j+TFA(yDLY|S?`9w@)66vj+ zq({bGpB>jSLRBR_=eX_uDvw9c7A$MQTw9e^ezLGuVaIEr!K-B8t{e_;#Ivuzdl^K zNxxFRO21nDRlinWu3snQONG2h$V-IWH&e(VAqycd+oa#1uh3WOtMnUnTfa%jR|xr9 zA+Hd!E#%cgzD>w$BD&G5Grd0cx>$I2N2A|(usCw{qbA-N3oQ;~-}(NaBz=ACd1)AX z<=Z`y^v$v7a2VTnG~=I_N^0T*vDjr{>=p+Gdh|zP&zFappmd8Kf#0>?UEj&;aHTx1 zKNT)tr$3=TDda1KeAPPrY5f@?UoGTo>L+*|ZBky)UydZasJ|rS`BeWR zlJHqXp+2hcrT&$WZxQmXF@@v$5C8G-qyCeSZx`|%^|wS6PU(OAC*e>1FCpJ4D(i-ygjh{!An^ zob4$?i8W#ThSku|btrE9y(5WVwi+qX+}OpZkrtDR-?cs4t5_s{(Q34b+-P+C z@6A(U>?+c^J51c+-@BFj&WHj>qH_Bo~JrK|CaOT=ExjOPyJJaZ7^a@vQ zHaZ(sMi-;2(aq>?^e}n~d5e%A67s`BeniMyh5V?Hw+Z>N%><1;Mqh$>jhtO)tZ#}BHGa>q2IIy|8vBKFwXNoGtirbWt|Xw(|#8Iz33#uQ_! zalVkB67thRen!a83i&x9Zx`|oA@AHwHd)4n#tdVoF^eo?cDN=|!1F?WLCCv=D?h9V zS4I6o-LX3~=3h4#vDz&*mWC@E5=RT?_68#)o_^2?c33)7>kTs(WFVV}(ook_nrn*M5LRo%bS zP2*N$U8L@}8MhmE7;B6>jk}DsMuU)F6Y_2$zb@oAg#4zE-xBg3A@AL6tT#3ocN-gx zO~z*99^+mi?-TNeLjGFF-w62+;c^I9vT)^0P*O)vs@v4nbU6BMGoB4sZZaM-9ygva zo;02^o;IEl@_r$|E#!BE{H~DS6Y>EezrTqrg2oQA*loN(Fswv-@?cmAw-a*YLw5aj zmgD%;H^Uv)o@uka;STZ5tns$-VWgyYjCYOqj048|#zEr)A%7&~Lqa|*9~+;9qvyww7X4Jnp9%R3A%FS!QNJVEG4vQs90QH-#tj@cw073{ zQ@YhHm^EL>pGOyWpM4dm`%v3W?$S2oEoto)p#<;Uvv0+o@uQ$`pmeh?HIBeLI`O)S# zQ_WVf{OA{T%+_c6#Y{B=X9H%cnICyLTRF4HY#XjzZx)*+W~q?>6!Kp}KE2*_w~KU& za1rf|`iGi3ste6dX18$V2D7tSWp**U3YSy35`;??K^TNS$0wge$SW zL#ktXkvY&D94>D#2MJfB26KpTHJ+e&`*iOyvUbLTDf7C_8$Nf+l(?M{=E(Y6GaQ*( zvN_fq7w6RoS5p16>5kNXGs2ad%qiwn^L%rfIo-U#yimB@!sQVzuW%{CeIh!CsbDnVNQD25|rA2;;cx+WaFU|43qr84V3x_|n)C@<83kjFmU<%=~{$Dn) zFs}+%Hkelmm%qWhTDY2ynLca&l(|!DC;g)?*O@m&lCBr7W)0>F;Yt}ftZu>Fe`eU` z&5?|ogsXX|!CWm|sbgkMnSXKJ+za!k4L;K-$>#0mnz*$)g{#GxkZCrU>*Gc?2v>SM zw8^|b(p%S?tId1O`-H2RaHR@YhH$mqV6Kk-JQPJzqyL98M-QG`H+jLN`SZpMoiZ40$M#dSY0 zKa32URzr@MAD>;%%}<1@;P17`{5;%Yz4?XtrTLX`6$)30aGm|okZgWy9zVNGM^|a{ zg!#R26$w{yy|<+!JLMPi)PE9wGk+JZQsHV_Ke(kMy}5;z@b3&uvSi^Z6Rz_5>smTm zjj$3eEnKzx{3D*eM`NDOna4i(B%jz3tIyS0Sw$(aZ*R|<~ zf(V{3Ts<1BLgDH;d_e8=S!bFp*($Zl!{zI(wpKfy{z6=AFHp`&+0E+{e^3Qa19i$LBcgy zxP}PV&`s7rYmhbA8e$ExhPh7&*D&E4E?g6ZYjpJQWRdV^{i!U+eX2FynqW<&iB)Tz zXHBvuTT|*EYvmY`SR-5`g=>UxO{h1r9hFrVgeR=GF0^J?GlgrEaE%qN>U}h^>a5w; zkgE|NE48UlsHFbibFNz#i7cR+5UsP6JnwMT$Sh^&B4s118 zo5GcMTkEX#)&}ctYol;Y6RzpPb%AhQC|on{HrrYESob<~>we)vxMm7hU3k23&6%Kh z``2Dtw_twHx=9P>4XB$uWz5LhdUqQ~$D~KC$08$cn{dslAJN8UJ|akf6q|s)o_P()-LNc;hHC07o8bq$<`azp8w3Oz1BYAnlD@n z>VI$J$Vhq5I{2T253CP`>tf-$q`oZI(IRxz`uJa~j`f-KMcl)e!nNp(hi@VtzO{}C z*J9xc#XX#`e*8}nKUqHu*An4cIwtb!o;-bi-Q3Kog=_!4|FeFx{tTC!|J*KGe_5x6 zD=b{1{+nD!bH$(FmuTXb{Vpc^8~dBoTLDM^^ktC&c7t#&f7hSnPxib09-8>Qe#J4+ z-^4MPb$yt3;`7FD_|3??AzQev6|O5{vnE{g_vr994^LR{PxYty zTL{;c!gZB!U43TA4~b2ezaRfIA^feQ(QE!OA%^^~8SyW}Om+;6{PU~+;xLgqT`yeM zow3q3JfXqw60T*@S>!MGSJdy#bL1wh^S7^mE6-6R`78Y$>kER8?1XjxPW8c{ql4t{ z;_q5NBk0Hp)q?wcaL)(#B5*GQ_YL4)3GSQ0T@UVC!F?yV*MfUJxSs;|QE-0;?i1i? z1fC|~Ndk`>JYMklz@vf30FMQprr=2dPbzpy!P5>rR@ZJF4mEgS*yf=Y& zHF)m=Zv%KYfOiuiy!V3lQ&8NXv;k!pC=)?B50rVJECA(VP%Z^!5hzPQ34tO&Sq92Y zpsWVv7Eo>jkYmk z;F|=#3&1xIe2c+%1^AYOZzcFvgKs_f?grl`@ZAHxN5JLR=u07_F9-d4&~E|#Hqh??{Z7!= zg1!#)2SDEf`oo}a1$`UnkAwaN7%CWDz?cojS}>jgV;2~^!FUslJz(qy;~g-*0^M{sG`01pXo59|r!3;6D%ilfgd~{L{dH0r+Qt|3>gX z2>y4#|2s5Ifu>c^v=21x2TdnJ)AOL|WN11SnofhJ7eJ`#3}`wFn$Cu%bD`;cXu1TN zhM;LZG+hf#*Fn=~py^A{^c86O2{io{ntlh(3}}`H&C;P+M`+d)n)QZelc3oQXf_L) z-4D&SL9@r9*{_h|fRqGC=}L%{evmQ%Ql>%597vf5DHlP?C6FQ@Wf`Qbft0%-r2$gz zg_JFj@&u$DfRw|KavGW&(7Yuy&w}Qypm{blFM;N5p?N20UIop&Li6s>yeBjt1kHzp zp!qOpJ_4GLg63C2^GBfhQAqVdY8yyx2dU+d+5u8KLTVMHc7@cDkU9ZUYaw+iq)vm> z3n2AkNWBzNmqKb7QZIwlt046nNL>!8w?gXukh%?0L(fC%i;%htQg=h@8<4sWQs0Ku zcOmsKq<#shUqk9KNIec|K$;WMBuH~XT4PA7hP26$HV4w?LE0sdwh+=5Lz;lJWsr6S zq+JhbH$&PMNP7lCY0p90c1YU+X>UNbSDh4glWNbdmY9U;9l zr1yvPfsj57(nmo0C`ca%>Ej`NBBalPbOGttLV7)<-wNq>Li$=rUkB-%ApIUlzYo$M zh4ej;{ywCC0O^My{RpIg59vQb`p=Lang8jhAfpRp^o5L3kTC`_CPKz}kTDrDrb5O{ z$f$#iIgl|AG8RI{U6AoKWbA~D*C69{$ao7f_Cm%X$T$KS#~|Z4WPA@9KSIXOkZ}?+ zPC-i-vh0F@b>v_m} z5wc!}tXCoHHOP7$vfhKN_X&~p0c3pyS%)F(V`!y9tB%lW2DG{zTJ45bUqGv$pw%zX z>J+s49a{Yb*+8}q*{P7-0xAIKgB*%yQ$ zdmdzmA^S4Oz6!Fhf$Zgwy%Mr-gzTFjdkthi3E9s>_KT4H3S{qs?1PZ~A!Hwd>?4r< zG2|3MPJ77d1vz~nXE5Xpg`DA#GZJ!YAZG&P)Iv^Z669P6IV&OO7RY%Na-M~p?U3_4 z;Mo`BpR zA@>)^Jq5XcK<;S>1)LC&Ax0u}_CK_C?Z=@7_-KsE&0Kp+nS1rR8P zKwAiuL!bi$IzgZd1iC|@7X)Y)0P^x7uL$x=A+HSb+CyGP$g6_9Zjjd#^7=qtf5;mIc|##@1mul|ylTiB4|%na zHyQHIhrA0QZzkl;hP-)@cM;@W3VDkmlox`$%OLLx$h#WyRzaQ(dD|fGImp`qc?Tfx zDCB(td4EFC1;NG;%!gnZ1S=po3W5_Mcpe0oLhvdGUIW3qA^0E!AA;Zk2p)ytCy*~e zzMByFUdRtZemlr7hx|#9KLhe-LH;d}-vIgRA^&s8KMwidLqRGOWJ5t~D5!ygsZcNt z3YJ5`O;E5J3Z8?4SD;`Q6dZ$sU!dSuD9nMvLMSYT!Z9HzJP!&dL*Y^=yb21hfx^vD z_%IZ1g~EMM_yH7t1ckptkpx99C@O-Y_E1y_MH8WDIuu<9MK?fEJrvyvMbAUgZYX*K zioS-TAED@HC|0331&TwdP}~WMdqHs@D4qz#)1ml6D834cS3>cPQ2Z1WzW~KALGdvt z{soGEg%TY~QlX>;lyruY-cZsPO6EYxLMT}bCG}9U7E0DZ$qp#l1tq(oGe>$8cJ`0(j8E`3rcrG=`kq%1xkN~wti@v z0c|s(?Eq*y655W2wzHt^#nARrXuBEOJ`8QQ5~A%p(Do3tJp%2V&@KtuxuIP)v@3vi zMbK_6w3`I&ra-$T(C$iTcQv%T3)*dlcK1TN7opu7(C#g0cM{5gGAER!Kv@=)WkXpn zC>soAL!s;D7z2J9)Pk}pllD6?Sr!Kpe!PG3d&QVJR8beLwR>79{}Zp zpnNuzUkc@mpnN@)-w)*vLit`OKM3U?Lirz1AwxwYsAvlnouHx$DngT?VkT77LB$HF zxCJV1gNnzYVh2<_4;4Q`MMU5)XrBe`gV4SJ+E0Y`)1m!^(0&!PzYW^o0qwU#`&Xg; zYta5jX#WSa{|h>_h7Lv0p#(Y%gbt%1)L{&C2t$W!pu=+L@F;Y67CLN)4qrouAECp~ zP-#GA8dRo3WnZWq29+bAatTyk36)nvW zT@Ib@gHDe^r^levd(i0!bov-NCqicpIvdcr96EP_&fTE%ROnm>o#%w0^NrB?cIdnY zI`4$euR-V6A@U`bs-{qt0##F?st&5=K-D8q^)ysH3tgH(7Ztkb&}BGusevvNpvwc$ z%c0v% z&}}tz+Y8+eLbng0dpdM)1Kk7AeI9gQ4BeMP_h+E{OVIrl=+OvzD9}TN9)qCAXy`E( zdfWm%8lcB|2=(|3dVB{xPC(Dr(6b17mO#%7q33+)c@gy70zIFAo=-u~-=J3l^pc@h zXXw=%di8}~E1}n|(Cc>S^&#~740?S5y<0->0Q3$*??ur2a_D^}^nM+Bze9-L??Inb z=#ve7T0@_Upic<;295z;7_HD-7%h0|&ssWiaqM7Ly~Fk%IaxCKVs1|v?wNMNLs5F@K$^Cs>TNrx`#{LFl|A6X7P~8Nolc3rS)n2Gpp*j_+)1f*Os%-$^){$}9IBsz>gS+(2UNcV z)vrMHYf!x(s*gbR$58zRRDT84-$3;VsQv+}e}?M6AT+KSjLU>^tzcYh7#DzXl`yU| zjOzm9y2H3$FwTZ?cfh!NVBCE$?hzQb4aPkV0$YVx6`1JraRL``3)=?^snp=L1D427EEP%{#0CPB?qsF@Bm7edWUsHuaR zIZ(3@Y8FGyDyX>$YU-ipHmF$xHEW?}J=AQ3ntPz;eyG_3HBUgzQ&6)HYCeFPk3vv$ z62=4LoiIKP#^=EJHZXoLj2{Ezt6}^lFkZm_QEXq)T~JvUz<-C91aPE;7C;~*j&1C1 zbad=v9Y;T%acn?DKoWWtkUxqbDuN&i(sbw@MNkyGf(R&*P^9;k0D*)+isH9;xG(qK zz0Y3jKAg4A-uLOjJUyAGXY%xHo<7La$9cMlR#(yLZ?w9ZRxi+MD6NLkYAUVf(P{y$ z7Sif-T75~YuV|I{npQv2YAvm{&}tj4cF<}Ut@hArKdp|_s+d-%XjMk5-)Qv*tuLeX zeYAdz){oQrDO$Ipbr!9krFCaoccpbNTKA=Oe_9Wt^=q^qN$XK0TE9i>akL&!>q)ep zLFSdCrjRx#XmgS_r^%{9RyDHf zl2wna24poNt1($k$!bQ{C1hPj)|F&kP1dz!T~F2xWZh2Ion$>u)>CA)PLTBsS?$Pr zj;tKAx{%eKtX^dGCF@1929q^`tZ8J;AnR+gek5x(SqI5FPF4}mG~$_bp2^^uTY2VQ zp1GfA+Vf0Tp6SjrAMnhlJhO~v62H;5Ds7W!dlzjVr0qkr&7o~C+V-JsU)m0#?JKk$ zM%!0u`wnf#(RM0rXV7*wZRgQ;A#E4Y_6yp6P22BiyMneq(sm7Pf8*Id^X$KQb_j`Q zm+r1=-v>Qmf!L%D9q1`arjiTLr+I>vBrL3E!u#dJJH$FoX*qvIcBS0cMA*~w(rAp1{b*Csoa>;`0~ zk=>N+=47`d`+Tx5B>NJwFC+U(vaccgda`dM`)0ClCHoGt{~;m!Ua}t``(I@LAKCvV z`@dvANp>r;v&epy><(mSliiu@Ze;f)yARp@$bNzBL1Ygmdnnnjl0BU4kz|i1`)#tv zkv)Oz$z=bJ>}h1rOprZ??D=GWK=wyue@ynLWPeWfS7d)n_HwdUlD(SjpUGZF_Ik26 zk-dfN?PTvFdoS4s$UaQ=5wZ)(E+V^x>{7BT$UZ|(kW+=6L=rjG$vKCdTIAFvr#?B2 z$Z0}OGjh_&$t33jaxNz4QgW^!=W24UBj*NkZX)Lva&9N*E^_W6=YDb?B!_-)0RX|dvZFG(}|p}GB+1vgtB{E@SEPE?vH% z%PP98q04c)l+mSvt{2huD!N`n*C*-vEM42v^+meAM%UqVolDonbWMCr*9~;tLDyY$ zJwvx7x~0(VYP#J-x4+Zv8M@`rtux(5(QQ24Cem#=-F~LqFLXOfw^MX0rF%WPpGWuR zbibAE_tO1-x<5nr9J+U=`)ee+zeV@A>HY=XSI~VW-H+3~jP4cmXh4tV^hl@2J@j~( z9*@wY3qAVMqaQuS(c^#gm`aat=&_0(Yv@rxkCXH`MbA`vHl=4Xdfr0Md+2$egr3jR zvok%r(sLX=|3}ZM^juEQpXvDvJ&)6~jGh(r%B0t&^tzm0chT!XdObw1LG*f+Ua!+@ zKD|Ds*HU_Iq1Rq|?WflndMD94h2B@v`^E&lZ=(03^nRM&t?Au|-h=4<61^wVdpf;m z(t9nvf2H>pdKc6CH+r9;Pc!;lK%a}~^H2Kxhd%$MPe1w$rq2-iyhoo|^qE7S74%t4 zpLO&}l+fo4ebKiGeKYC%XZqev--qb?fAsB7-ARl3+v&TL=g;B! z`aIu|=l{m@ck=u{c)mT)cjfu+JU^1>$MO7lo?p%L>q$Jnk>`J-Usd`g(eHZt-AcdP z>GvG{y3?;G{l;=OtJH4_{Z`X&J^eP)AN^D4UxWVV)BiI1UqS!J=--C^&(ME3{okhl zSo(iU|JC&WiT+0=^gl)aQU;_m;9>^+g#nK;;AsZ5X21{zj9|c<4EUGx1FU;eGCA{znFKp(8J-o1wfz=pzE(2>P7#VI$qA`h^N&JJvJ!ji0i5?_+ zk$9KH6cW=&93pXoL@|SIX3$x{pnDkf8iU?q(Ax~!&7gb+9pS}Gc<~xuyp9)#@Ztzw ze3KWq@#217JjhGe@Y2n^bPF%N&P!u>=^b8L!%G`@X%mC%Gq@RpTQK+;2InxiGl{{o z8N7(Wiy2(X%fZW)dHFtGeuS6*&C6qWc@i(b$ICl;`4BJXGvv<A@?DdF2aU`HEM{8CIELRT=hw411DcPcv*9!{#$=A;U@-c7|8+>OXn)KfL;1UVWQa zC-dt2yt+BTt9y8LAFnmwwdTB*&TCKcT036rz-x1PZ85KX%xlMZ?KH2I@%rC*{Z3y0 z2d}@v>u>Vig{DL=wH>>kzP2PNh#G7q-vmI~Fn#ZUG zj4Ec-Z;U#_=tmg+6r)=)dOD*QF!}>VA7^wKqbqppa^CtYZ{5gSgL!K>Z;g=f)=u6! z#9R4{NoUN(jQI;=dNF1oV+JwibH*%Z%n!U>leg>fb_3pS&)Z#jyE|{Myn6-jUd6k;cy}Q04&vQayqm|n8yMe$ z@fR`v62>PwF}^qB`!aqp8NlTfuj7i@zX*rWtGHErFa+$Q5NoSax#^ekp|AonyG5N~2OumN6w=ww+ zCO^dFN0|H{CO^*Proc)FTD3K z?>)hLPkplHv1nf@F`p?PFy$kre9V+jnesVPzGBL^Oj*v9l}uU9l%JWhjw$PzvWY2M zn6jNIyO^?Kxm?=k?Qpl7drj#(HlqnTVIm6UoY89p?F||60spl}Y7E|jowLViD zF|`R(n=v(=shLc@l&P09^$ zW|lDX46`unLS|jbtgD&z5VIa<)|1RiJkP9`nDsKVrZZ~+vp!(f56t?7S-H$Q#;nuK zDr0sFW?#hYOPKu#v!7yiD`xj+_RGv3%Ix{f{+QWInVrY%ZOq=m?BAGEl{rbw$z;x@ zB<5VsoClfn7;_$HPG9D{$eh8OhBe2~S*SzN@Dx-4nJ zlJi({7fT*w$wMsZ$&wdXl3>Xcmds_ze3txtc4HGuHnZe+KCZ^c$$Wf2A3w^+|KZ~) zd_0$r=kxI~K0eLIWqficpZt?g{>3M6^2xh=GJ#JH^GOk(oMh=OEWL-N_p$U{mQG>m zG?o^!w3MZZaz4GBPyfoNH}YveJ{`=bL-_OuKK+GHbNTEXKC91X4f*UzK6{qW+Vj~$ zK3mFXpYd5K%YtQ0Zv4Jl(@nu84Y{8c;`SNML zY|od^@#QCc`88jD%U8*KRhzHsO8Dx(e3ivlZTV^uUoGRSFZimAuPgC&6~4ZSukYgP zyZO35U%$-PL;3m}zFx)GYxw3|zG=ueX?*iA-#o!LPw~zBd^4MG=JHJe-<;%|Q+#_Z z-~K(px3}_bAHE&Lw=ePSYQA01w;TEHJihxg-(AReL-}qb-;LtCU-@n~-|gl5i~0U) zzQ3052lD+ZeE%xnf5rDJ`F<748?!uv<>#}!6U%$Eyf4eo#*de;X8BL7sLzUKtZ2cC zXIasi6LRMeQ>aMJQp4I(Xy_(hQS-p`pSFz@Atht#rVo3c@zWjr^a?+{$xoyCX#+p);HO>u+>D&d)vh`2&9b zl%JRJa|LUwu(ld&Z)5F!tbKsB!&&<_YtQ!owVPSHhqe3oxL|#?$lF6$<-k->;Oa7AM*N<_X2r?$Qw-F&;)s}k~f^ZcgdST-emILCvP@+ zbIChEUNLzkGs$EK-lI(xU2P33I*olSr6>p%GQ5q|wQzy6nB zpWxT2{Q3dEF5=fE{Q3#Mel6kGZ`qv0=2SM`^pXUk=5xq>ZMv*lX0{GBbgvgH-FjA6?=3AVh; zmI-W`#+Dgu+0K@Hwj5zgAzMzcrJOClvGq>2KFrof*!mx~{+F#;Y;DWdnQUFm){oix zDO;DZ^?SCiU~2{2lG#?BZRfDs|vf4*pmzn8Vb_=J`kGy(?5@i0Bz9M4cTINJV|N2~ zw_|Vp}4eZ{;o=o;!$)2m(a~*sB%AVWU za|e6gVb4_dOlQw5_RL|=N9-}|%HG7i?EMdW|I6Md+50qm z+q3sM_I}FV73^Kf-qq~=iM<=y`z!m>*!O4lUC6#m*mo)Wu4CU{*?0DQKl=`{FQ0u! z*jK{7)9k;D{eNTs&FsIG#Qxjae?R;G$^N12e~bNZvws}>$FqMb`=_)25c^Bmf0m@2 z{l9UbDhHA{@HYF~jpxu*4$a`uY!1!i&_WI^ zKhnI5rGY)^p;T0VIk;7|99A3-eUpc&$!#g;1EFXUfN{#E2(Oa5QUzm5C{$^S3;?a1#yemC-alHZ5? ze&oME{!j_|uaZBU{E_63CVv9?)5xDs{%7QWPX2Q8SCYS){GZ8RNB$P_x0An%{JrEK zAit3OQVOb4kVHW(3hGi&pMpjdG@&4qf(t0Pn1V|a6kI{U4HVo?!M`YYn1UxLc$$JX z6ttzFJq2AU=s`hm3ZAE600l!R7(u}V3MNr7gM!%<%%fl-1&b(HM!}a9d_%$a6#PKJ z9*(4(T@5($caGdW_P5S)V`3slW^-gNM;3GB6OMewkuNy%HAjBr$Qq8U?)4k#Ibuhc0b1+<=A5!dxB$6bF2-=I&!QN$GUQ?2giDIY!t_qa%=_1 zj&bZXg+XBr3L8?GMqx_|&!_N03NMjRco~J)Q+OkVH&b{kg?CW+0ELfJ_$-CpDeOt% z3lt8ba4>~KDSVZ}(GP&k>wnG}9N;inX?pl~IH>nL1L;U)^VP`I7K1I;Ns zOyLm<3n?t3u!7^s9IwOiW@8(6i)M-2IDRL`@8q8goaoGncR2ALC+2bDV@`a>iREX_ z!k;;@juY!Sv56B~II))#2RLz<6Gu2v$ca*lf}-jaHK3>wMd=h}Qgi`D7gKa8Mb}Yu z14TDcbPGkdQ*=K?k5bf%q8y4kQ`Co|eiDjaplA?9gDDzL(MXC$Q}i}P<0$$cMe`~8 zn4<3}T29f=6s@CZJw=-++CtG@iVje8n4%*T6;f16aZp^H;sz8qqBx!6Oo}g{_~Hb` zmr{Hk#WzrV6UDbsd^^SWQ~W5!Pg9&taVLsoGc`9 zvXql&3!b4QD5*lpIg~V_B%PAWD7k`?8z{Mnl3OUboszpKd61HaDS4EV$0&J%lD3p| zqNFz^gDDw8$p}hDQ8I>-v6PIbWGW>yB$UjiWF938DOpO%_muoZ$rehsQL>+sLzEOy za*UD_l$2BQJEw4}GN-C>>Re7W;nbfwbv381<<#FfbsMMdU=1>J3hfOJsQy*~ZBTjwHsZTkzo>LW^PT}+w zoW6n6|KRk)oNmSGHk|Ir=}w&P%IO}Q?#Ajpj!s!xDpP{q{rRPvupVCH@Hlef`rRkJj zOzEYRUO}StYD%x8^cG6*rSuU>+fe!pr8$&#p|m@ty(sNV>5G)UOzA6>zDDUAl)gjh zR7w|6x{T5{>o3b>@no`!B zvX+#cPuXRZT}jzBlwD8Rjg;L%*#ngQo3dvpdzP}!ly#%5CuMyo>qpsO%7#+*DrLhd z8%fzX%BCbJn@8EFlr5v|d&+*GY!zicQT7XEnG}gQ$B+7v6TOh^0|~RrTjC>zoUEwAEmsEipo^f zqM{BJXG0|w=TXsuiVQ0LOvU9?Tt&sTRQ#2Szfo}~75}8-2`ZkVq8$~_QISJM7b?0_ z(VvQeRJ=&V%T&BV#hX;TOT`o_7EC@ zMVKcn5xy0^7YRQItAw9~UxXt5-DqLGN{A{K~PCgKYb8%1mu zu}#EI5qm`J7jZ~Lfrw)wPKY=u;GYh)T^wrF2m#Q&hS@RJvGH zx>QuULR7k1RJu-7xZYC?(3RaS~B8$^{&qRJLgWt*s4 zTU0$yR82G&Ra=UxnWF0BqH23l^*K>BM^x=Bs!kPEKM++HiKr=)FRC68Rf|Q{5>d5ORILzI&xmRvsx=kW&KK1#5Y;Xg)h-p)ZWq=5EfUpQi)vY- z+B2eBJ5lX9Q7uPQdtOv~LsWZTRGTWQO&8S`h-#mSYF~+Jt3|c-qS{eW?W9OLMZBI$aObfZYRStR{KB;6yD?iWc9ilnw8shvb5wHHYpMN%h`)Kw(CAd&`& zq?bg}NRc#3B#jYCV@1+?1BKZN4{GdpFSR_9xk{=VvPl)8UBDtMNeoiFkh~yVV@?eqtvPgbKB)=w--w?^; zMe-z(oOn+p&lAZDMDm9sd9g_TL?nMElD`wl%SG}^k-Se->hZjth!NO@AEJS`F_ZA402khFo_^F{SfMD_1Q^%bJ}>a(DqMfG)}`bJTGv#7pJR6j0iB#Rn<5jCzBHEtC(ZWlG~ z6*V3ZHU1@P{GX`txTx`zsL@)~ct+HCN7VRG)L12Izj?t@lK&S)$flQERcN zwM^9dTGU!EYHbv?c8XfNMXi0J)&WtgSkx+cXHYaw)EBiIiP{&4+82x3mx|g~h}u_+ z+SiHNH;CFdiQ4yy+V_jv4~p6kiQ12g+E0qwZ;9IPi`r8}?dhWSOi}x&s9i2<|0Zhx zA?k#vbCalZuc&jssPmwx^N^@BN+jw`7Ioeib*73s(?y+pQRlR%Qzq*CChD9Kb?*>$ z9};!{Pt<)()O|wKeOlCgM$~O5>OLpxb`f=Zi@FI>cbKUAwx~N+)SW2mz9;HV5p`#Z zx^qO``J(PeqV8uBQTIDhca5mKS=8Ms>h2PC_lmj)MBM^W_n4@ALexDiQi;?Qky>A* zHWaB%MQU@A+ES!mAW|>qP1;BJ}~0`k+XCM5O*lq&_ZETZz;xk@~Di%}$8a zo+9-Hk@}iQ9WGKwiPSM7b*xC8C{o`OsZ&JiT#>q1q<$k(zZ0n|Me1si`m;#Q6R8_T z>SmF;N2C^r)RQ9hl&Duj)ccdDS6kFe74;g3dTF9wQ&I11@2uBS)H`3)yHM1-R@A#* z)VopCyGhi$OVqnt)Egw~y&>w26!k`ndSgVrNuu6+qTVJ^Z=a}lK-4=d>J^B3Cq=ze zqW^=FCtb4C66qW;IC z{!&rDTr@}$4N^pdnxesXfRPUcuyi4Oc4#Hiv}}AgE^wXJkemGXs}8&*en|SE*drx4eu5WJBo%w zMZ+IN!(F1`VbQQaG(096o)8Vsh(<)C+M-daXw*P7N)wHmibl;vqn4u4`J&Nm$wiZp>h^C!I)2^av zLNt9zG#w(E4iimZ7fnZqrlUmDF{0^I(R8|KI!iR2E1E74O+OS(7mKE!h^E`cc_Gd_ zUz~ThIInv`oHtsW_o+DV8*$z$ao#3z-WGA*UUA+rabB@F?~G_hG)oc9YKms{M6(M; zv&%)ZJ4CYwMYArV*#OaOplCKqG#ewDjTOzti)NEVv-d@_siN5o(QLM8HcvE5d?lKF zE1E4A%~pzLt3|V)MYDCH*?Q4zlW4X@G%FF!tBB?eMe|EU^BYC;{}avIi{^tx^Kqj2 zLeYGwXueD||6DX*E}G|x=9@+Houc_6(Y!!3KO&l+350r$3*(GBE7vx?;z5j6X}CQ`WqsBq(~nv(%%;8<3#!dkv>bL&k^Z!MfyCE{;5b` zCel}l^q)lfFCslJA<{RB^vxoDn@Ha&(vOMs6C(YjNIxyo%SHO{q9xI?vS?XHw7gEV zyhpTbEm|f-%c-Jeu4q{xT9%5I<)Y>9B7?}NEHbKzjD{j3Q)K*EWLzXN{vt9Gmy3+6 zM8-`b<6e<*zsPt{WIQY~9u*mniHuevBS&O(78%_{Mo*E^M`ZL9883^BAtGb6$QUCs z#)^#bB4d)scwc197a0ph#v+liL}V-#8Oub*N|CWjBr@_u#zv8`S!8Sz89PPBA(2ra zGLDIi6C&fJ$oO4kCW*{cky&44HWr!ZiOd!v^L&wcp~$>MWL_mQZxNZdiOf4i=G`Lm zK9TuPk@?|^SiOjwtv%kn3C^CnL%wZz)b&)wjWG)t&+r{~n z#QEon^J|OqQ^olW#QACe-_n_f{yhg^{Ph~YOv56YWXa0={kh)n&-;r+lyhscT$9<% zIb?2QS*|a$%$P0b%rQqpRtj^4X{;43$0Bk>G$O|$LLv>%pHI(!ubzLO2g7&_4TcUw z!jLgc#Be-@6EIA}FayIW7*5A98^ie+w)8*D#V`-URTyrXL z468A`g5gaJ8!)_!;XMo=VE6>XXBfV~uo=Ve7)LN}gK;~IV=?Z8aU90oFpkH#g?~H% z<0OoeF;2%g6XR(Z&%}5(#&a>AkMSammtvfcaRJ7K7#Cr@1>+Kow_&^!<1&n^Fs{S6 z5#y&Af5vJTtZJmMCCDzwqeI3>}V0|Oji?F^M>!-0^gY`zNf52u8Hsi3_2b;;* zoQ%zR*j$6na%}F$=0R*8!RFB+HjiWT5jJ09vk9B;u=x?2pRoA_o4>FfjqMn0cffX6 zZ1=?W5Ns!6dlt4AVtWa;mtlJ~wnJ>^W4jpJ<=Ebb?Fwuk#cYq4FwCHD!oo3I;; z-Okt@jNKIMj>c{pb~CU$1-sL+n}yv4*j9PH*|HxIk(vAY$!W!SC7?oI4AVD~n5 z8?pNayI-;U4bv!0BbY{G8iT{M1Ew7@?T%>=OcOBeg(+fcFdc+x5~iauO~*6`(-713 zm=<7Ki0Nibi!nWc>3K{qVtN_VtC(KHv<}mUm^NYh8q>F!zQ?qgq3LH#e`5Lz^C-+C zm`7vY1@kz}yJOx1^90NhbAvfyJ`nQ}n2*Ce74u1$PsV&I=2@7}!8{xDT+G*Ez6JBG zn3rH)iur!bD~6aK#=H{qIP7=BemwR)_WNUhB=%=vKfD(EJFveS`+Kmz5Bn9^zkvP6 z*nf@v57_^KWfYbXETgfE!LmJ;30U^RQekPaWGn^C0ay;iau}A0SSDjR3d;#treT?Z zWfl(0Iap?6nS*66mU&p_V_AyjHY|5yS%&2)EYDzh7Rw7*Uc#~t%X%ztVA+7>eJp?D sU@IIX9PEdKnK(ED2Mckq90x0Ku=2mxx~)h3J9Z7b9{!*4zZ^XF4;-8`egFUf delta 37991 zcmaHx2Y3|K_x?Y-nOPvo?oQu?bP^KCW=SE00D;hZ@1&4KKty`8&~~pCL=hb|U;#qM zf(Z74Aa+C%6%iH1f?dEaD*T__730_E$DcgpnK|cu-*fK0=ia$@CWkh;?!VKu(v*Ck z_&&WQv9;sMhkC4*(?sm@*c)SSid_+VbL`64+hRAyZi?L;yCrsO?6%l@<7VvoL%!M( ze8l~@`)N;ePpT)w)6&z*ljq6zwDpvDI(j;Lx_Nqf`g#UgR z?s?kttY@F+MbE3A1D-cMZ+nh--uHau`6TE$;rYt*o#&M2C(o~*KRjo>POt23=8g9% zUft{SHut7_GrTRmt-N{Od~aKCiMOM-v$vbKr?;YAtduzN? zz0q-vyg_fsE4(*(S9ovsuJqpRy~n%8yVkqT+u&XA-QW#A=-uIc z$h*_K+xx6{k9V*4fcJIp8{UK754<0Gk9j}xe&hYt`H-S4x!*N=K!WQl|7(`YHXD0m?vSkTO^qqEsqXO0_axsZnZ` zI^`l|hB8x`rOZ|q2bCpCPzfnFDmN)Bl$({il)IIClr_p)Wu4NXtXH-x4=Ov9hm@Vl zF6Ck65oNFPoU%`OUOA||sT@+?Qa(~XR*oy5DBmk5l~c-TdZLTJ% z$!dz)TFq7cYMvTU+o<_!C$&uNtaefRs{_>FKy{EhPOVTY)hhK8b&7haI#r#fPFH8B zGu4IaB6YF4M7=>>uHLBLq^?%)RPR#nR=29#)O*$Y)W_7v)hE;^)fd&5)R)y))OXaw z>bvR@^)vNz^@RF``lI@j`m_3r=G0;|NmDdc(==U6(=xSeEmsR@g<5e?E7dw_-L&pn zZ*8D9NE@z=)y8Sn+C|y~ZK8ImHdULc&C}*ZkNG`XBnA`Z>dED28fiK|?nTBf&^Al8qE2!)R-?H#!v@ zw;T5uYmBwV{l*Ssm+_eKq_NxBYrJ5*YGCnqr8=n}#PmRxv&yBB)zl^iS zITO=iI?WhUGG)_c#+#~{Xf`)nn3-mKv()Thb~HPgWoBoyi&<_CGl!ca%#r3ObF?|e z9BWpZEcPgqY{Pgze}&se*yXRSTf zi`Gll0qb?^i1nUz)Oz3g$U1HPVEt(QWc_UYV*P6UX8rDq@kzc|pU0>9OkaX8(U;;& z_htE7`HFq*d?mj2zEWQYUuR!8Uw7Y7-!R{B-w5AG->B}sN?(=lBHskx6yFTrWxmUO zSNK9c;k(Lro$p59Exy&hJAHTgHuyIBHu)a#J?4AHx6k*Y?-k!`z5~8DeTRKVeaGxN zcQ?l^_6GNAtAmisgxpQYHA1cx@=_r=gyh&Y+%wG4WUY|i64Fn*?)5HP?pR~bGA-Mi z_M6@P?q7PcPB{06YK8N)P^-*tk5&x|9seozEt(Na99HbrvDb&nHpE^Ndu{A>!g)YA zUl-0dHpDKCT^4(Ta2^!SH-+<%z5kxNp|^%=8e(nXe5;XsN9;YJvdyvev8!TN$KDxx zSM1%w`L=MrBbDc9A1SIN#s($-1u2W`}6T#Y~QyW#5wLcgD@N#YGv;xXbM0d6~|*EA8^> z`OdgS_N?hS&bXjm8))f_yUN}(y`?kmT6=q-&>6SXJ|4(*#x1wqZE~D(E9{vw+B)NI zu^ZY9bjICgpKR048CP%Lw5i;2r+r{kz;TcL$EG63I=jv0HjWK;_2v$a&2~XSfirHK z{mSMndq+Wv9lOQtc+h^Rpp`Rjr+sWzjx+8Nd+L^Uj>qkWEo~f6*$1}-9J}qaTiQDI z+ON;fwhOi<+sd4HXWUD6>YQ9>+^hEXtr?C3_Q9>$j)V5fqC)%Ut%hB;E!}b0uHKgI zc+Wm{Q9+Zq4?>AGN`ielQMX^;=C;?iZRL#n#BP|Ix}o>YwvI2(CqP-r90zIJ9Jst^IA!fBi=q&lIe_3wQC>n z+e6y7am3q8+WQ^x_Py=19P#!W?X#Wn0sD>11J3vY`|RZ{9r5p$E1^G?l$AqQQZAVATPG1mfKijd1Grq!Jw;-J9A03Mw z@peI{md^MIcK*TwXZ*!>-NLZv=}zsO@zd=5ow<&gc4)C-&)Avnm}B3(v&b>u9>1uK zy=SM(zIBn`KDExj2M=$vQ!9=7b%E{Ac>?T@+?IpgoPyOpQeGam8SUCRT``1SUwhZF7EpxPvU zQ@9Bk_AN`A+fSBzobg-jv=`It^lMCe-PN*P{;27=&psU->5SiQ&%QqFu_SmO`1-Cva z*>`qNcgF9t%b!cLm5^$G+uiGof6;a%MMb7RVLD#18+t|W7WVKt<6pBgc1Lg5U9C9d z-}om{PtQbW{2|+M%`j*DJNB#zQP7T_x-4HKC6q-d@*hpfmn6 zyRdg_JG9JYAM7RD3(>)yD*^Z|Ic0r#w`&4h$8Gp(y zf2OD7M_YY1U=O;!g){yad-nDHo$b8{ueei(zT@%{LY~-Ffz`+ryWB+kI2A z)17bcSy5zH-<0Zf7um=64R^G&#jqkfcW|21T^bpz^d#M0JlO4Ycd}2tQf%)Tob7aX zvG=SP>U4Lrm#l7WuYJMq=xK+ZOS9JvGwic3q&xc9X)g}83x|$#x(7w7Xx*^HTVK*0 zL+u?a2Rhv&?6WJ!Iox(=*l?$NoUOb(#8G9}zFh05vG=|_)NzsRcq+{vy4tk&43=!q z2-V@X(?^s$+;;7V;ZFBd`_wCyjv01g@3cs6OI}qRvu!b~)LuR^(dnLNuUl1Q@3>8O zx-Yktmy7JQQOQpC0{iUB*7mvKarOiIvmJ}=S+`&8aNFlbO>nxew%5Hj$X@%JY(F}B zu+zQFUU{I{?l>mF?s}lauB}gSxb229{haPwZAVfE`>%Sh(|xvT8Rg{ucR-5c#Ct4BE9TkI7g+((J_%!8@+u!;c= zw|!4V`T56eS$VSj61(_fyFI*sbGJ?L}|KrakF?Dpa;^-9Jx3_ip#M zp|S^+fcshZ9`{~*?VBTmB4(Y4SubKXikQtJW~+#~SH#>eVz!Hz9U^9@h z{e$~QyZ+F?polpmq!=N^3CSxYT}VknN)uA1kaC3N7g8G`6$+_XNS%b#MM%AcG+0QZ zg;XJ=@j|*tNS6v}j*u=B(gGnZ5zA#cbwb)Ir2B=mLr4z` z>2V=FEu=j{dS2v8FAM24A-xgq!dmLM$dk;(P;(K}5aRFd_N0gBWT}12J9+lew=6v~ z3fRyH$g#_hmfHspyX?MiyAyJw{!NYkHerFG^$y+9(cbcQx?UJXY-vQavv+@7Xve*y z>!nfOwnkr>UHDF4y*z5auhHHk%&C9fbZCaBchvhpqql$kAj?r`8;8||K~d*}jm}}= zgxW`1H07|VkBkByY6Oh6Z#g`)X=T*D%WinL$WD53nvj2fPbz?UCsq0fsNo@oqpg}vvEuet#5jpb@ac{^-LZ`{||(XI(yX&a{bbE@`ajm!rUU8iB7xi{s)CRsD^q^W8@0TVZFBed=R5^a=6%@d@MQPv#9UGM&Fl>J^RuzP5&ke_^1(Z((d+A z&qU7;QOEH{$1h>#nCKij@=>1ru?gmy?-w(CmYc@(Q)eWsd|1@Io;@ttSW<|!8U)IlJNJc z@?#^ST{xZc$TV;AS<3mP<>y8~S!CJC{#-x5y!_he>v6t%lQZYfn=eGOvL;lV*V5{e~Fm08@(gEBMEp%i$L`Hs{_u?@BBJTw6MDHXaNkWo^skhB^P|ARO_& zN5K2OkXi|;b%c{^-}-e{(0kl_;)2P2;r%i?a(O}uL`boj-5wnif7gch$46S&wzzol zKie4ZN$*b=l=|8GOH?XfNCk~jcDrwU1>V0D=Y@z$j3PyeMM7#DCEAO=NzRW`lu+45 zC0=nW9>pu9c0wuLH|_5l*j2br1b6C%=o*Bg`vT zx+>k2?n3G#q`pGxC#3!xm7Yp30!kkt4G_}62yak?m+^ge{xD^9m^WM*p^Q{U32BIs zh6-tzkcMwm#wcS6C>25)A*7KJ-lz!g^!Hi8iOQ4kj4mUY=k#1T&)`l zZ|J+ZdSPLQ_Jtkt(hG|_bXZ)mu&AJc()Jfmwa(wDYzy-?DVvoo%2pvw71A^zO&8LPjmo{s zeFT&Tgfvq~vm(6NcIxT%`Hv}2UogLCl-&fBJwlo*qLZ{g3h4$REsw;x(LV8GtNb)ID^#{oO;Pt|>2@LA5#iO_($B5(yQ;k| z$my;2A)xjX(rO{y8R6VzKlpR2{2}VdFz5d&adnhBnt(c1NcRY7O@y=7cKzbdAFoab zx2Z<0RqND?gw!CU^+MVpq>UTZiRvT*>SQ5p64K@fXG>%iYWr*J{MqVd7mRhedIbS> zfsnQd>D~zMK70AEt%D);+6%VnI`w)2>M|icAf)XP&V%9Ey(UocP?ySug(W57xm{4S zcO?dlx_)Kx-yNJvd~Ms#=C?SIS3U!!igAZeqziGaFANRJ5V z(Fo@;`=;Og`46Z&FPQ%>^D0rdqTJtL&u z5z@1Er{A**)cxwg3ugDGdWeAfwvhG;>A47LpMCT1*+KQF`q2d=eXJfQpnfW(7lia; zg!EG6L3mT&&+V#(g@q->dFh1(B_)f;Ei7zTRQ$gk{FVCs1%*$lrwFJ&2r2ayA-x(= z-X9*g)~#ZX*|DZ9seCDu6~NcQDzsMja55q<)N|$t&5O8Y|y$2 z=~%tP<;YCbdWLEmv|d8`=xx5H8Q*Gsw7yzDEyKS4Y)aC{LOLF9Ms;{Brj)d4deb2; zWvVt<8)83uHbWk&4P&PL?AbK?TvD2*jnqbmV~i5grw!T|A$=B!k)>6JV^j(0^TrtC zwSHPH*+Tjv65@ot=-g`cV(pSJf3lFiYS5+#>FdAw)5H83Li(nWKTGSU%?b0ri}1g_ zeY#_~dWE(i%)e4dCmXbdLOK=Ux7LDTen?2C8~Imh{j_Vs{2wFyA41iQ!xQPXA;BB8 z`cT>Z+H&ni?Ivx7cC)rpyG6TIv$fl_+qFA{^s|tD71HlQ`a?*63F(}W9YT%~vMl5# zLT)DHxcjwL+G_1i?Jn(Z?H+B7wpLrGHE8R#4MKJcSrM{N$Voy@6LO}IbA%iaa)FQw zh1^!i#aiPiZ;p=gFZH)b4!yq0=?FB_?u*)fi`q6tY&)X1Ghv%if70p5Yo|sj zDCD~cL~pfrM*Bm%ufB)N(ZVo3=1rM7V}ssIk7edRe*0uneo%Mo-ha~86(J{t-_gZ! zLf1nz4Z0!Z#Q)^2Cuo0YJ3=%Sa`Q;qXa1dbijYmENw`cRUm*2#y~RI$r)LT|`Mi8~ zSUx8xh%; zJ?rMx3E32~UvVCynLb^gq0byMcto3t5l3G4U=yN+Gh3fiJ$S^R`Sa>7nl>p++n~?Z z=Q4Bj;1S`w#*8o1uW0O$D}~(Ve8P*u2`?6M{(ln=cgQRM>JX_>>^i@`^q=NOrdFfx zMtw!7tUqENt=|=<-6`aD4f@?eE(uS=tTF4t*)<5c z{eQFDr2V0N7|E_Q(w*Vj`ezc}8y>F4@9Fvj`h#Kd?LzL@pzjcJr~0+Aj*Mje5&iK{ z*?Rp^{V^eT7IK&M`V;z-LM|6_*ZL1)9d+6IUj2no*#`YNeV_ikkh=@Hhmd=2&|lPF zid`n;UP2x(oNv}oh;z&spuZmKv{8RUKd8T{9};qJArBJrFd>g>?3;J=_rhw2^>_6n zLhd8vzC!M|K|iX$uYVxq{z4ugRfofUH4W1ZcOOPyUEM$=H) zX8o`cV@UcD!(}vy=ES-y z)9dD?cj#DLnm%-p@^!R2_Zn%TP7QcNG5_?Tk!kotWm}9aqot9p-C?vcS{u1St`qVE zAx{!=+z=s87V;D!Pu*hV83Ch>k#7_jg+`H(rwe(GkS`bVLLn~^@>N2y6RI7$MIP^33(dIHN+y zvxGdm{sG<5I;qZ>7zSKqOc3&1As5RHru}_!z5p6R zo$fW(yWe$xV{9@uYZjxlp3B|GjQfPVEb<}lHz6+<@=Zc|JCqaNNW~lz^39FIx;@(A zH#83G&i@@&J-VxrZ)`+7VJ|w~%P^jfQdcxO_B4KIGWJEiD;vEp{r&06T^V_X8m}5} zh03-X`;FI(1IFvd8^%H7P2-S|ZxymFHa0>(SWVFGxKLu486 zhYt_(okG4#NRCj9kXMHkA>aMjgFZ*8;}YYGP)$QvZ1ws6wTy3!pThCJHNG>xH%=O- zjMK&s#*ae2N62f0yjIBTgxnzH^+MhtHM_|5p;I7624XQ(Xs%17cV?$*Pv15hv*#?0mUdY=ryDmuo-&!$~&9p{z@NalV1b$y; z*O~tdx6GDin^4(SGuzBDTbZrRT+?snnE@d`Amr^reo)9eg#3_@cM5rzkRRS^=9>j( zp;=_MHH*!5W{Hp=5%RM_J|N`Rh5VV2zY_9K!sV?|QpQf0IkA3pnnSP8OLBZWsGHd@ zRJO(JZuT&Hn!U{4W*@V!kRKKDV?ush$WI9QNg+QaU11VTz=IG+hA zp%Nj7Pu4X`X1ycDq1RuL?09cVMX1xd^F3G{>eRSDHEYdF!V&Ati_8h;M01jPu{l}D zdxX4K$j=FRpOBvy@(V(KaSPc5Vkeu^%<1L~b7rVI95LL#FA4c&A-^i*{o$A&r#P(o zgDH+5hhIrE$6#|o)!-4s>t;-z)MMuS8S{kvN@S5X7nzGI{@#&{m^5#_xrCYjx>p>B z%qU$5`L!tHYV(@f_(A-^HygF=2&$cKddmXO~T@;gF4EaZ2$5isvC>&;aJ%sb7y2$=Uodh&>n z-xG3ll6)ZKPa?)oYm|h3J$v<1(2jLG%ir#W5>n%m5Kg?w~fG}rsh z2de(rHjS7xZ-aTixt*D#29FpqXzZLR#8_kain4g%R3it$opN$KQ^NNH2?aaKftiC znuf}@SPsi+#aNOhTP~}Kkbf5PFGBuR$iE5scOjn<@*i8QW(2G_E1m!W%PZtRBkBGn zT(0niyyGaT|1i^$5NvKGhe{i)Bq5(|uu_D4?*G#QE5phRl{Hu` zgo_3%OSl~4rp%Z(Y0kxU6B?UowX$-cSO9^-s;epSx4b&JXBeot?+x3zw_uSTusljEUSmrH&nLKYHbx+y{$gN z_)37@-r}Ekt4t3D#i|-Gjrnn3G?QT8$M}nB+t2xV#BPFp|bT>ku@TM zjt^gk(-kgx^pxp!lN+(|R)tm7m~ORjdFtDfzenm^ilGNGO+n7n!6t3p=>#`l$Nw-?J{|9h~RWDr0 z!j)2gJlm0;Y~5|G{SRWD)gWAH!j)d1lH+JO+S+VA7%JOpZLzjm+pK%7`>gw|2dwSF z)k3&3g)2+AS_)UTaODVBE8%Lrm4NjSS!7!eTaQ?eT8~9KBR8aks)cKqa7_@diNZCl zzCOpXS+kzAo(~WAKH>5=ST6`yUj2|(j>2BnE7tz7{Z-)#G+3_*SDTT8>ZZ(SZ1_Rz z?NI3k>rLyB^_Fnu3s-?~6>hNJA=`RaxQfEDhSa~%$}yqN`Y=?}U>y^#w&&TFb=>+Y zJnufSKD9oxKDSO-UszuXSFv!l6Rr~BYA;-+!qq{zI&QJPw!X2xwZ5|otdrI$;p!w@ zWx~~0xCRQBBwT}pYjFLt){gr$>x}h>^(W1&zpS&?IUhbpeeYbysMwyu)kU~E3s;}| zO}UP;5?82ZgRhCNsjr!El?zul;p%>fX1+LIyj4)qZ{nmG^QK%pWzrns>MC4PWFhu> zeM)_X-;oicGBbRUHsCy?bp#eZ{)Pp7zQ}Elf6v^=J4Pn@l6=V_gsZo3^$Hi(#Z304 z{j~l$$yxy0|%Y$#m*aNvTU{l?FkR5!6^#-w?d&zv*0RqGlh+s;#xgV8mn z$IOY5l_+u&@kReu!qqFfvy9_}uRK)K;Oi<}Lu(XcVfZt6!Q#C1h3!j=^U_O7+AnVW zrs(VG>mMrH+2_6!-Z>vaE%nMQNlHPlX=KD&^O3o_=X7iOW|rZM!3d> z#tTdH0H2exF(&?VMREHn|&*V>tf-W(wKwoyW>A0>V2z( zYqD@%GA_Jdm^fwL%sH7|7p(i=U8wIK-?~t#_3urqufexoxGoj0sr4!Oj%3BR*|&ve zzOBA(O!VF7yT86)zGGne^l&|1Azbqh`?mWY^zHCHL^Iz`-!8{s-~EmuzDMdG$#)D1 z&JeEo!gYCwcN!;{?{VJ~;R!~zaLp60nbGMLs*bksgzwq#6x-w5>w8YPW(n79;hJ-P z!W5jJSJ4^wpZT*t;+^}i`7`qWnn3@PFcTev!~cDc_}&Q-Zqwz$b=mn;j)aSHn{Z8! z&Y<^wAJiy0^#>9iiS?%n9A2mI!+J-dqfqjF`@DWl zp(7{Q4&0r<-51F=&x8AYaDNEy zkHCE#+@FH`b8vqF?ytc84YGB3(94nTnWk|P?mvmJ1F;nvKEvEP&R;ae-M;ML3te1rl6{zCWG1%)b^lu z0ksdPLqQz}Y7M9pLA?~z%R#*o)Pg8DmX-9Q@* z+ECDjgBBbKS~X}jpgjQElb}5f+HTPHfc6S#`$4xr&jh_C=sBRb2E7RMV$iPw{btZ_ z0o?}ucF@;=z7F)$pq~W;Fq~jWV7S5Xf-xL|MhzHsU`zvJ1{kxzm;=TC6G`G2^}G!3=+CPLRUz*7!t06gl&-U z1|*z>L<162ATbRRiy^TD5=$YmBP5nVViyP|c7??5kk}IvdqZMhNE`-koW^6{shfcXxaZkZeJ60wgzw8NNIqS&5*JcQtpM6U6ArLr0j;2y^yjG zQr?D?!;o?WQjS8(2auW%silzG3sU<)>Oe>x45>pQbrhtIfz)x3S_i39AoXfUT?xU| zTOrkk)Y~An0a7UK!o0jWD7^q4seeIQD@Ypy zY1cv8qmXt8(msN;D?iH6r_)b^r?_O9n$AO`aDR#4AK`u`eH~ALi$oj-w5dsK>CA_z7x_P zhV&O8{Uu0$1=7RwKm7n?n2?bQ8GgtJKt?fSlt4x)WORg#ZjjLfGI~KqAIKOC8FL}y zX2`f5GS)!GI>^`n8Ji$uCuBSf8G9jPA7s1$881P`E0D1tGQu&BLPqcd$T$WWA4A3` zkntI0oPZYb(4rNz7y>QkLyOy?#goutAGCM@S{#NJN1(+~Xz>BGI0h{~h8CYdi_f4% zSmPwLI1QO@$h06c0W!-WvoB=!hs?@E`Le^Bsx&pEmK-NOYS_E0kAZs~f-2qvvAnQ)Zx*M|AK-N0Q zx(~7*AV}7Oko6E`?SiaFA?pORRH0>8XgL#Fu7#FQLCcq+<*U&00JMAqS{{OyZ$ry3 zq2+04`6IOa4O*UomVZKaGsupEYz4A4$TlE55wepYI|Z`+kliH+*?k~;BxH|<>`KV4 zhU^;1o(S0&L-r+*JsYxbg6umWdlh8g1=;sN_IAkL0ogkt`(em_6mn$9@jy-z^lRBIHbioTZR+3*^|4a|h(Cf}Bl|vjuVXcNpZ3fZS1#I|g#cL2f1FRzq$LaA{sn%(9|L|D_?v-09{gVLYv4B_ z=uZHD68KZWp8@_X@aKR(7yJS67l6Ml_)Eaw0sLj)F9&~j@b?0LU+@nA|6uSB1OG_y zj{$!L_^ZKR3;qeez&{QAGr>Ox{PPLozXJRV!M_Cj0{qv2|9bG>0REf6zY_d5 z`0oJ!YVh9;{TrG4LM;|7YO;0{mZt|2y!X0{@TT{{{TNga1$PpMyLnuz0fk1e#8>oapH3Y7Nz*P{q1_Jj(;1LKs27x0GI1YhNp^XFD#6p{RXwx3r zbcHtEq0KaCGauSq4sD)3()2?v^fK9{)GHO$nOaGWsqM3`IkWcr3A^p74q+b z{CgmOFXX=p`L99#A5b7cfeQ)(P*4H|rBE;&3M!$X8VVLc!L?9uJrrz)f(N1CAt-nQ z3XVX*Q7Dw5&;x}E6c$5aXDBR(!i%7AY7h#iL*X4zxE2ZO2J20^<;(C%7jcRjS*2JLo2yN99OVQBXev^x&%&Ok{Fl*mxh21?pPNe3ty1|=0x zQUxV*p=1%1EP;|$P|^S;8=z!AK}z0+lEYAP5=wr9k~7dg3EF2t`)p|6AKH(A_M@Qv zWzaqd?FF>I3)*jl_M4&obI^W2v_AmtPec2##-C7{0HqmFnhB*np>z@g zR|TPT6O?X;(j8EG5K7;J()Xc5Gw7f}2Ln2kLWge9p$BxB03D`5hZ)deIdr%UI@|#r zUV;t>p~E5Q@Ede=K*t#9SPUIIL&tLHI0-t=fR3}EqYWK{cSFZD(D5bccn~@sf{uSe zCmA|5fle);lOH+-pwmd`R1KYKpwm+5bPIH{q0?^Y^b&M>1v-5Noqm8$KS5b4l;uEK zYbYBAWff3X1!V%tZh*2IAy~E(%ASIq**E{4u?q4Of>yaYOL zh0YH_=UveG9q4=vI)4mZoX{l>y11cBE9g=PUD`sILC|G1bQufbokf?W(B%f`avyYg zm>^vqg)YaS%NNk)D=3eLaudpZP~H*Bdq8&?*h7U=pRbUguGzl3hBpj#nyYYW{(5W3v}-EM?#N1)qr==Lde zFNE$Lp?ev0zaF};gzmRO_m`pjo6!9&=%GQ6BJ!+uGCD7wi=&=cUY=<5@pvO

#O@m(Zq1WZm>j~)f z9Q1k~di@E#W$4`mdY41*zR!9GtwpQoVDFVN>4^mRbrA<%a$^sRut>!9y8=zAaZJq>-&K;J*1Uw`N~0{V@D zez!osJE7m*(C;1ScMSS{4E=TJpA7v|q5nAOe-ZSb2>rJ}{~geOCqepu4gG(F{y)Qj zQW($;2K0adSHplCVZaI)@Hz~57Y4is1HCXX0R}dQffHchG#EGo20jV{pM`;YVNfaz z%7HoC}Y!D&GloB@N!z~EXKd=U)Z1%sc4!MkCI3`0CH zM1dg{Fk}J@nFK?&!;r^d$P+N+FBs~Ap-o}v7#La$Lob4%8(`@DFmyW%Jq<(8z|cQo z*gzOI3Wkk=VZpU9Y%2`A7lxgLVZXt!GcbH83?B!>D`9v848IqK-w(sTh2cNL@Lyp> z9*k%QBih4=AdFZFBW{2Z@56}CV8jU+nF=FwU}S3;IU7bUfRT$}wBiW1fSt4j3B?W8-0LUxJJs24hFSSR2OP4P)29*rPD^QyBX> zjLU*?0T`DL<7UIS1u$+AjC&Ery#eFigo+fX$cBnmP%#B6=0L?fsMrA&&p^eqP_YLp z_Cm$$P;n3{-iL}0gHZ7iR2+wjPod%jRGfy2pP=Gbs5k=^e?cWsSpby-p>iBlRzl@? zsH}y`2~c@4RL+FT*-&{IR9*>{i=c8DR4#|gl~8#rRNe-atD*8Ps9Xb;!7WhvFjPJY zl}|zCGf?>~R6Y-tFGA%jPk*qP&FBWbx^eos+L353aGjrs#ZbOT~M_Kf>oQLYClxH168M?>I_u<303Ey+6mQOs8*pm z6{^#rx&>5cL3K7%w}R?isBRC{9ih52RF^|_H>mCb)xDs4FjNnP>Pb+22~KRZy z8>;6Kr22BGUI5jLp*jTBS3~uUP`v`GH$(Md1A&8tvz0BR0G&0A0# z1GQ;Tn+LURptcZd+d^##)CN01ZFi{c3AO#8b|BOaf!Zpl9S^k=pmq|}PKMfPP&)%^ zXG84*sJ$L)mqG0cs9g!Qw?b__)UJlwyPPA6b71WJ~x>~540Ckr_-E^p%1$A?wZWYw6fx0K5ZXeYBKZ@-; zx(oVx>z4>!|*$wOZMl1eFb2 zj?$ro4#(+mf{y5zB%xzx59qjvj?3xz9UYI-v67BIC+L($rd=s7Tr1RhDd>5S`qVvOa{x6*$ zp>r0UbLiZU&d<>KxnKPEbRI(IVRTN6qVrpHeuvKE=$uFA@pN8G=hJk#j4lt+WdvP5 zrpp|<%%jT!x_m*GC3IOzm-TenL6=>0*-w`ux*VcQ6s@rco38iK^+CEmMA!e)wF_Mb({&_WU!m*kbbXVq6X-gLu9N9H zjjl84dY*1I>6S*fCUk2_w+yzNpzb`xA}DYoNkNg_9fkx(QP^1R?uxd-8Rx~3*ENSZ3o?U(`_%0SL5-PJbo*W zXY=?d9{()C<6rUkG9Le$$A9GU)nwgA)&peyo2-Y)dX%gVWOXL18(CRobtmg-vU-u# zm#hI~Jxf-CtQW|7k*wily-e0?WQ`*0EwbJrYaCg5zi7vkHG!;2WKAJ!I$5*GnnTun zvOXtk5m{f7wT!IgWUU};C0V~Dx|XbUWNjpC3t8LA+C|o0vI@y6ChIU+$H+QPRykQ! zWSt`GEQxGnCy`y9>{?{kAv=}qhGeIa-IVO+WVa-{HQ8;+Zcp}QWM4t{m1JK{_8-Xp zBiT2Q{b#aoCi_;h?;!i{WdD=wd&$0^><7vI5801M$bO9MPGol_`w6mh$bO3Ko@DnS zyFb|j$sR=ZV6umj{Sw(D$$pjWH^?4M_879qlATNT2V{Rl_QzyTCVLv$Gs&J!_B^r| zko^VOOA=%+CHot)za#qxvR9G4hU@~eH;}!V>}_Q4Bzq6p`^hdM`w-bj$u1@P1lg5j zpCtPX+2_frMotPjHOZ+>PCarOkkg2qCge0DC((kO4076#(~g`=$+?`I-;i?^Ilm|8 zT5_%@=TGGPg`8W+xt*N9k@F97?jh%2H5^Ef%#DlbpB7d7qr|i z(dL$0gql_Nq z^h~2?OL}I|^BQ{IM9-V)`6xZR(envA zq1RvObr-$o~nm(7O@6ThO}|z5ht> zo9TTEy>sZ@hu;0@J(k`d(R(7jm(zPSz1PtDBz>yUCz(FY=+lNi7t`l{`aD9P|Iuf# zggztb^9p@t(q{pE7SgAHKHKQCgTB@2n@ZmX^u3h6SJC%(^!+=1@2Bto(f4Wk4xsNq z`c9zlbo$Pu?+W^^rEdX!Ptf-aeb3P^lYYOV-xUe^{gZwV((fVq^`hUi^czIKarB!& zzmMtn1N{oJ zK^HUV*9^LnK@T$MF$Q&H&=3Z_!l2g}^cjP`V$d=M?Pt(Y29=O#NTNBvOy0yV7pcVU zB<>{9okU*}{Yfk)@hyoJJl~M#oAZ1No`00*yYc)JJU^f3zvTI)494JE46e=KhZx+E z!JQfWDT5a>co8qu;)RC1(1;iQ#|w}1LL!S7#`D4yUYN!Uhj`%xFH|t(8iw4&keeCu z97BdNWH>`sF=PWnHZimnLoZ?IWek0sp*!146Ed&8+hqf zUb>x^hV#-WUK-6yKk?EgUfRO&_6)z0;a4%dKf?zze29eMKQO$2;p-WZ$%tPu;tEDQ z!HAxW=*@^3jQETZpEKexBgz?3$;jU_@;XM|z{mt6hcj{{BfnK(j# zFR%WKSKs5+iM%?ASC8>(6|bJ;wcqgCwY+v6uf5D`Z}Hj~UfaQI2Y9WR*ArLq`t`hi zBd-tR_1AfQ6t8dK^_{%Fn>T*L8`tv2b-XcIz2vhEe?( z^*p0qVAKjmtz}dJZ(hQizva!Vd2;}7zChy5p}e`AH&^rK8b&8Gx-O$r8T}xmA7gY! zM$cvRVn%<-=t@SP=dIwaJ9+Cq-nyT+-r%jVy!9S$?dGk6ymgqjujlPsc>6Zqp1|AF zd3z>fYDgGUpD_&?^8jNWWz1uYnZlSkjG4!nD&E06)p+Mt-uWl*+`~Js^Uk}xGmdw* z@XlV|+0VNfyn89{{)%_AdAB$3_T}9&8^yTMjGM=}WsLiVamyL^E#nFpx1Mpk7`K;kg^Vj^ z++oHYW86u`onhR0-b+;Dy%gT7$$QuGUMJprllPYK-f?p4lG}{jbaGpfn@R4)Meg0?-be2L$n8LGS8|^qH;3FFc4wd5Wlw}RX%a!-+amb`l8H6ZUo@*0!Zl)TpD{hGWh$-9QU>&Uy2 zyqn0ooxH!1_Yd;!A@4y6d5@AeguD;Pn?l|k^5&7ZfV?lrTS49r9NvGE_vi5bCf?u8`+F0N@5cBn#&>7@ z(~R%M_`ZxE!1!kw|BL1Y#=pq;;f#No@vkv{6yx7w{5y;v$M`(Pk7xV@#!q7W6vj_y z{4B=LVf=i?f6n+tjQ^7H%NT$0a>lP<{7S~JX8c;luVeg1#&2Q#cE;~o(DlF3ty*F( zAI;~ZO?DJCQ_A%zL8m~aUbE@Q$yOn8V14>Mr^6JB7#P$rCL!W1SxE3EbnBHOrFK$ z*-YNT!zzCQ~nE>Qzil{En&rWa@)VeTb=pnL3iGuP}8cQx`CGAyc<7buUx*Gp!!e znlSAmru~L#*D~!orai&5o=oe_v@uNkfN38x?Hi`8V%kqkD`HwH)5@6s%f>XlEz{dE z{ZCB4o#}Tn{V}FL!SrmVzsU60nEnRSKWF+frhmiq?MyFZdJ!{fGNS=AE@Z|v%(#ge zH#4IXGjf>mBr}FHV-zz+Gh-GrK4-=k5@zgV#xIbA%uHcsJ!aNt=5LvK9W!rWW@l!0 zXXaDPe3O~)F*A>uOPKi`GrwnMB{R=6E0}dLvwqF2E17itijA0!mN4B z`sG$I>nmmzGV2(#O8K-kpI*kNm-Fd;eEKk-KEkKZ^XUjaeVI?^@#zvi{fbWy@@W~L zmNPq**-e?n@X72mU9naja znfoJiS2MSSxj&Pbdy08!%xlTK4CYzTKa zc_*1)jrqyUZ_oTInST}Y|Hb_OGXGKLzrg&Lng1&D^O?Vu`P=!dy@b!M z%V&f5Y$czq&H8@x?H{ z7|s{p^F=;itYcAq7G1=m<}7-MMIBkxnMIRWG>b*ES#*#^Wh^RZ@#QT3J&Uhl@zX3$ z3}Ep<7B6D)au$Eb5-h33lG-e}mnHvU$$wdr$C61bnaq-XEIGoGV|;l7U*5`>xAWzz zd^v_M-{s4ld|AYo2l?t6zPgF8Zsw~&eDxAvjUe&W*L?LOU#;dBXa3S!EUnGb+gN%x zOYddrTP)3E=?5&`#M0d?-OI9zS@vs|UCFYiSvG)W16j6+Wy@Li9m~$}bqZhC;Ol$% z`XRo4n6D@B^>hhe&*YoheA9?;8uQH~eAAV09_O1+`DP*CEMj>Q%j>Yb9?NfG`9D~G zH_OMfd#oMfShZWmdQOJrSzQ2+0Z{zzr`2ID%e~0hK z^8FUR-^=&=`Qajd$mEB%{E)*BefS~Kj~~A0hkSlm$I9BQY{bgOtbCM}-B|eqD?euC zOjdr%$_iGV<462>4?jM{j}PbReM%tv8orV`mpLtR;^&w53K&h z7qdEz)lFFaA69o_br)7oWc3VI&tmm4R#&n5BtQM0pZ>&8H}TW+{4|1}UgoD2{Ir&z z3Ru&CHO*L)&YDMA(~UJxux2i67PIC{)?UEc3t5{cVeS91_J6GHz}h_4PGaq3*8afS z0@ki)?IzZ4W$ixJ7Lp(2*CW3^`3=Z#NPa8wTa!PC{K4c8CI2PzN0R?4`EQUvn*1^3 zk0n2s{0|c3e?HHpH2Qe@)wZ*1^G+JUrPQrTM(D-^s=!J8DkO~Jbqyhp+N6nsd*L<&BkU@8SODEO3u zxfFaxqF^Bfiz)bug0Cs~mV)mo_>qF2D9EQ^Jq4R6*h;|;3U*VlkAedf9HgLpHS-GVA6`Shs+6U$Aa5>%L{(3f4DceS6kl z%KFP$|7+G?!}>q6z6a|EvHp414`Ka_tbdL5Z?Jw9>$k9e8|!znemCn6vi>j|+OXkD zHeAJq-?QNwHr$k8!_92y&xRM-@DdwFvf&jryv>Gp*pSbLoov|6hJ9=(WWzBwl(O-6 zZ2Suw|H{VO*mwsU-(X`N8$V#b1ydcWpjTv4`%ZaHm_v!CN^(j z^L94xWOEUl53=P_w)}y_mTTE^JzH*M%WZ7AgDnZRyuy~(*fNSOquG+nmiO7RpDoAP za)K?DZ26h3!PX?UUdPtk*?K2i?_%pe+4>+`A7blAY@Nl{*=(K1*3a1bm4vO!*p|$; z25h^KZH?L1lx?lq)`o5WW?L7wbz@r=+j7{}hi(1XwwP@{u-NZESDI z_Ev0f&GxozZ^!oEu>H4e@5A;N*giDD_LtZ`g6(gz{VleyWcwzzZ(;j(w(n$n5!(;4 zqZKIXnNz&g0ekLc&!g;lj6I#$ z(}g`xvgc_DdnU1GE_>#)=X3Ua!Jco}^DTSR*qh1Tw(M=s-b>kg6?=cj-oES|!rm9z zJDj~E**luOZ?ktbd$+Q8J9~GrcMp3HvG)l3e#5>S*!L&){UyP^zq0Qx_WhH6L)iBQ z``%>Vd+d9keIK%KBKtmJ->2-G%f8Rpw~&2{*|&*(=h$C|{l8=XjqLv$`~S}V``O=t z{hipK!~Un(-;@1)*x#T1iNWk2%Kn$wKa%~gvVSuB*RX#Fg((!)qp&H3tth;l!e3K( z4TaZHcq4^3QTSI1@1pQ-3h$%v0Sf<3VFwDIq;LR*!zdh1;V25n%7N(|_+|bd_?!caIPfJ0mT_Pu2Uc@nEeFoMeQlNjH0V4x`CqGDEb#g4^Z?dMI9*WoS>*1MOhT} zqNp!L11NfyqF-XeDSDft4=9>W(M*aKQ1k^wODI}O(Ki&WqG%081r%+dXfs87C^|&Z z35ro%jpEuA*Q2-r#f>OVG@&?y;x-hwqxe#aFQ@qT6#t3hzf=4VivN$|hbaCp#s8zY zBgNSiKS^;9ihEPskK*Sk9zpRKir=O9Ly9L-{0YTVDV{;`XB01__?Hin;;$(Fn&KZR zUQh8JiuX}`gyIs4%P6j(_-76V2a`EigM$}vur3EL;o#jIe1wB9a_}_{zQe(Z9Gt|# zSsa|h!TB8goP&!vxSWG4IJi>6!POjG%fT%i+{?id4j$*=DGr|H5Dq1As5*yIIn)$dTJQaxX_7<;Y_kd1pz_xIFP8N2YURAx9Q*WI0DxaAYM%R&!)6 zN49WeJ4beLWG_bwIdY66RUF09x*SdAXj6_h=V(igw&rMCj$XmhD>-^KNB_XlKXUYD zj^54Dhe#al%F)L;+JmFLIogk-&v5iPjt=AK2#&tO(bqZpCP!y*bUjD+a_j<*UC6Q4 z9J`!jf8^Np9J_^Mw{z@o9Qy~y?%~)&9Q!ZF{>QP79P7fd=OrBbgk$qLwu55_ICg?# zXDPXWk~);6QPPx>=9ILgq%|d%QE~+(S5k5{C4Zpg&y?Ij$-R_3O37oCJV8kgB~MY( zlafA^45DN(B||BBsWT-bDH%;kE+vyFnM=uhO1`9I870dpSwYE4O4d=bk&-QxY^P)w zCB>AKQ*xHl3n;BaX&R+XDQ!+^%XfzLj#h~~D7}}`M=5=b(kCd*q4X(Ads5nm(m|9C zrgSK!FHt&@($SRWQu;Bab10oh=@LqpQu+<0-%hX%VI691jx5 zlQ>?7z4V@$Wdkp5tdIOQEcxgtAtYwWjPc%C4a7O3JRL><^UvnX;QH zyOpv#DEm8Q_fz&5WlvDnm$Lqp4W?`;WiL@SlCoDR8$;Px%5o|DfU=J$n?~6J%9c{L znzA(s$~IHBjk2AT?V)TxWk)G1rR)S{m6V<2L^V#Na-tb0+H>MkPF%%_-*e(xPF&B4 zKXKxAPW+7%|KP+uocI?fp5(;aoS49gA33p}6MHz3IKqiv;HN21ro0B_7f@c8^7@ot zM0q;pttii={9?*~P5E_{-$MDll>dwJ|5E-x$~#ith4RNK??HKQ%KK6N4CT*JK8*4= zDSwah$t23BQa+dR&nRC=`C`hyqWpWxf28~;%JV5-Px%hYizq)%`8g_3QHzQ?RHRbT zkcu=aT2j%Pindg=r{XdyuBPH9D*jHzzp3~S6`dqhbfw}6DsrfJii-YJ45VTZ6@#f5 zO2w;GjG^KKDyCC0lZpjYd_lz$Dwb054Hc`XSVKht6&tA7OvN554pDJ}N>o;(vNo0V zsBDm+vJsU{sLY_U4VCSvyp+nzsr)^af2Q(ID*unle^c3k%Fa}FqcV%i?o{@rasZXj zQuzz>1u9>r@?9!FqH-3Mv#I=o$|Y1TrSh9!wBJ#=hROmeH&D5m%57Bcr?QmFQ&iQU zsuooZsY;`&DOJs>YDralsxG7I3aYN8>T0TPpz2nt?xX5{sve>0F{(OI)s?CzsOm{o z9}-plsTxStAgW%X>J6&KQZ#Y7bTW zsX9Pa3024W`8j?b$y~Cr5Dd zWlp}v$u~GTmXq&ssy(NE$EiPX>W`edfm46x)XkiFlvCX}^#rGKIQ0~#p6Aq9PUUjy z15SO!sfnDLk>J#4oZ7&t?VLKz=?0u`$>|JEx8Za8Cl}i_?8LeUdZPIFrhm2ApZcnI@cR#+eqJ`6Fj;=1k%i&fLzKzj5Ya&OFMQ z4xH)CnXa6fz?tcsS;(11ocWS7%Q&-~Gb=c=k26O&bBr^`IaAKr6wcP>{=3M*Ku|uXSZ;6J7;%sb}wfOIa|!x!<;?F+2fqMfOB;@SD$ki za;`DwF5+A|=UQx4n=WB7kG3T3dzB%VxaQ;5d|Bv$>INzD`T{-_A=O=T1D(7c#eirBVbN)Ez zPjJ4H^FIq$2-gWW2!9s-BJ@oNLxdNF;lfDaD`A!JlaMd06H!w{nusPMnu$mkaj%F+ zMLZ^=lZY-N#)JL?k^bk~)Z_&LXLsNa`(;`iZ0gB59aN8ZMGv7D=y(q){U2Es>Nbl0FbgABm)o zMbZ}{X|YIJA`wYTMbbAS={u3MP9$vuQZ7=eM9L|Va#mC) zs@E0OQ$_WLqI#OB-c(d?E~;N6s^284-z=)%DyrWhsy`yC|4&qZOjPeAs&^IDpAgmi zis}O-qWVBleYmJTQdECcRDVNMA1$hn5!EM(>N7?4MWXr=QT=OC{aaD}dr^J0sJ>QI zUni;;iR$M?jkco3uSAV&MUCr3jlYN*w}=|IiyD6uHSQHP?iV#46gB=MYP^~dH71K1 z%S4TpqQ)vwV}q!%S=87jYU~s>_J|tCM2%8Wqe|2`C2E`%HHn%vM9m9C&AOsyeNpp| zqGmTybF`?LD{8(kYEBe2XN#J1Ma?fn%_XAdQc?38Q8Tep)LboUt`#-ciJIF*&0V7A zK~b|()cjf0JTGch6SY!At(u}%s;Jda)JhYznu=PjMXgIjt>21TzZ13J616`RwI_($ zpNiUZMeWZ-?S-QDVp01mQTuCAdyPocE)ccXi`tt+?X9Brc2Rq$sFNh>Tq5e+C+a*a z>bxlGye8_55p_Ngbv_bxrinUpM4bhq&Pq{dm8i2p)Y&BJY!!93i#i8Iox`HeDN*OF zs7usM5_PMKy0s*tZXHoKRn%=M>b4ej+lspFMcvCp-77@hD@EO_Mctc3-J3<-TSeX5 zMcsQu-TOq{5u)xGQTJU@_dQWJPt@Hf>XwSSWuk6{s9PoKT`lU}BfI{p-7f0A zlo0ja67|N2dSgYs_e8zjqTW$auSC=<6ZOhP>UARZc9D9gNd2csy;r2(FH#>8ss9zJ z{}ZWQL~4#m?IThLiPV=x>Z>C4O_BPxNPSnN=84qtB6WgDogz{bpNiClBK2#Lx<;ht zi`0!Gb&E*dE>ic1)cqp0NTePWsg)x2yr^GC)UPM%UnuG~7WFR@^;?SitwsH|qW%@4 z{vSmBzl!>|iu!*O_5UI2-y`ZjAnN~H)PGoXt=~b^&l2@}iTclr`Y(w3FNyjiMg3Pr z{WnGZw?+MTMg0#&{i&k<0#SdVsQ;y?zf9C$F6#dv>aP;@*NFO?Mg2XZ{vlESh-eV+ zd^seVC(=ZNrlLV}(V(Sh@GH^a3en(7(cmi4;Cj*EM$w?RXfRkb7$O=B6Agxo2BSoS z(W1d>(O|1+uw69RB^vAz4GxJ0M?}N6qTz2v!>dKZKZu6ciiUp?4gV?{4it%o!$rf9 zqTzd@;rpWDhoa#`(eM+|aH?oHLp1zUG@L6ME*A|~h=wag!_}hUTG6mTG~6H>ZW0Z* ziiQ>9!envbCE~(c#D%@Yg>Q=sXNwEVM5EfGQ6tf)v1oLWL^MhljV=|9ekB^+AR7Hy zG`d+dx>Yo~Lp1ukX!K9f=w8w2e$l9lXw*$KdO|eH5sjV_je3ekeMF;vqR}&=(P+_V zl4vwXG}2%R_mS{RhG@UP+elD6Wl8B~Xil(bY(>0=L zfoQrxG~FzkZWB#+il%$SMK#1lzY-VSBQEMGF3J@bEfyE;6Biv87nO@@v~pYSHWuqS>ECvww+Z{}Iie6wL-CM6+DcY>H?$O*H#bG+QQ`Ef>vJ zh-NEAv(=*6TG4EsXtq%_+aj7B5zR_Ovog`FLNxnXG&?PtofFMNG*1@IYl!BVqWNz{ z^IJvpheY$oMf2xH^S4CvPet>@x1#xW(Y#PJFBZ)Yisoga`FW9EU8L6$>1iUpsYt&_ zq-Toswj#Z~NWV;^UoFyqFVe3S>DP<&J4E{5MEcz#{XUWYfJpz3NPk46KPJ*UiS(`_ z{c(}rLnP9Bi}XGseUL~WEYe>P>933QQ6l|ak^Y`Ye_y13DAFg3^l2h}hDiTZr2i<= z_lg$PM2l3>qJe19OtferT4ab8ZA6Q9qQ!4Si>pM7--{O4iWb+47XJ_}`bb2Jp`yim zqQzv?K;hAX)y<;S!=lyWqE(h?)m^lDTD0mV zTJ;sJhKp8ji&pQ5R^vpgJke^rXf;8!O3Vi&kqytL>uI zPSI+QXtiIoDiW;@iB=~>t8$SMA|pv;R2LbwL`EHvkt#CMMMg`J(OP7*6&dYC#$_Vo z8jr7M8;zxqm#(U5gC0&#t@P5qR1F2GF}xK zZ-|VyMaH`#<2{k_k;s@OGG>d6c_L$h$oN8JED;$iMaC+Tv07yOBr>*%j2$9lzeHpd ziHt)c;0m2Q9`snBwAOA)+a^lb0RZD zX0phvB{J)X%v6z?DKdX0GOrbx*NMzOi_Duv=B*<0ZzA&_BJ&=R`KZY3CNg`A%)TOX zpvW8~G6##yVIp&c$b3a)ejqX@i_FA)k-0!*elIeA6q!GX%zTl#USw_(nOjBX4w1QA zWF8lpo(Ppt|^Oa~*B-)gVHkG2yNzvwvXmeh)ttQ&m5N#WYwv9#G7NTv2Xxm1# zZ714ZA=+Lk+Fm8v{$8~0AleQPZAXc=<3!tB(e?w;_9M}DmS{U$v|TFNek0m`ClPIb z5N%h9wrfP&0?~GZXuDsuEfQ@HiMB^Y+fvc?glJnS+MW~_R~Hwz78hS3F8+(S_z`jO zKymR);^GP7;?Kp!KZ=XDii?kmcC|#iI-*^wXxC7*OB3yyh<3m9f4locyZc4Ehef+b zMY|57U1!m*yJ**6v>Pbe4HE5M67435cGE<=m7?7`(Qcz?w?(wuE7}!`cEzGynP_)j zv=7lfS+uVq+SeED8;bUKiuU)3_K%D9-9`Jv5Yhe>(SCtwzgYZ#3+MLoha3ms=UH>a$RT=5Eg~0wPGifOmZF8!YPB0B7DX#1BraaP zd20?)8k$yb%@)lm=DB%ppTFRl{DhNVadHADe`Bn|I1S@WjI%M;V$5M&h;cE-r5K4Z zk8usgjTnz%JcqFZV<*NgjF&O?V7!X)I>tK~AL1-N!dS#u!dS*Qgz*i=cNj-7j$*7} z9K$${@f+q@m}@aF!kl1kz`PFgdd!oTk>u@bAX z)?uy3n#bCRwF&E%8P;uBw_|O=dH`zy>p`roSPx@8iuEMc(^%WFp2ymS^$ON&SbMSd zVeQ9y1M5w!x3LaleT=n)^(EF1;{cBLaeRVf5yujaWgLfae2rrT$1xlyaQu$r4;+7CpMkvw`yA}^u+PU? zUx0l%_7&KHy$*Xl_B{3m?2XvhV&8zh1^W){yRh%UejNJ=?5D7|VQW52p(_-TL2dp4F>PdG5zk{vW3g{R7c#SrY&N diff --git a/main.cpp b/main.cpp index bafb5383c1..9f4d612058 100644 --- a/main.cpp +++ b/main.cpp @@ -45,10 +45,10 @@ #include "head.h" #include "hand.h" #include "particle.h" - #include "texture.h" - #include "cloud.h" +#include "agent.h" + //TGAImg Img; @@ -100,7 +100,6 @@ Cloud cloud(300000, // Particles false // Wrap ); - #define RENDER_FRAME_MSECS 10 #define SLEEP 0 @@ -201,6 +200,7 @@ void Timer(int extra) // Send a message to the spaceserver telling it we are ALIVE notify_spaceserver(UDP_socket, location[0], location[1], location[2]); + } void display_stats(void) @@ -650,8 +650,9 @@ void read_network() ping_msecs = (float)diffclock(ping_start, check); } else if (incoming_packet[0] == 'S') { // Message from Spaceserver - std::cout << "Spaceserver: "; - outstring(incoming_packet, bytes_recvd); + //std::cout << "Spaceserver: "; + //outstring(incoming_packet, bytes_recvd); + update_agents(&incoming_packet[1], bytes_recvd - 1); } } } diff --git a/network.cpp b/network.cpp index def6845057..e386ac913d 100644 --- a/network.cpp +++ b/network.cpp @@ -11,13 +11,6 @@ #include "network.h" -const int UDP_PORT = 30001; -const char DESTINATION_IP[] = "127.0.0.1"; - -// Location of the spaceserver to talk to -const char SPACESERVER_IP[] = "127.0.0.1"; -const int SPACESERVER_PORT = 40000; - // Implementation of optional delay behavior using a ring buffer const int MAX_DELAY_PACKETS = 300; char delay_buffer[MAX_PACKET_SIZE*MAX_DELAY_PACKETS]; @@ -97,7 +90,7 @@ timeval network_send_ping(int handle) { int notify_spaceserver(int handle, float x, float y, float z) { char data[100]; sprintf(data, "%f,%f,%f", x, y, z); - std::cout << "sending: " << data << "\n"; + //std::cout << "sending: " << data << "\n"; int packet_size = strlen(data); int sent_bytes = sendto( handle, (const char*)data, packet_size, 0, (sockaddr*)&spaceserver_address, sizeof(sockaddr_in) ); diff --git a/network.h b/network.h index 751b8f0f87..e06f416b9e 100644 --- a/network.h +++ b/network.h @@ -16,7 +16,14 @@ #include #include "util.h" +// Port to use for communicating UDP with other nearby agents const int MAX_PACKET_SIZE = 1500; +const int UDP_PORT = 30001; +const char DESTINATION_IP[] = "127.0.0.1"; + +// Address and port of spaceserver process to advertise other agents +const char SPACESERVER_IP[] = "127.0.0.1"; +const int SPACESERVER_PORT = 40000; int network_init(); int network_send(int handle, char * packet_data, int packet_size); From 4226886042a9c4b5e5cddfadd0733a5ca5d96f96 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Wed, 21 Nov 2012 12:08:30 -0800 Subject: [PATCH 015/136] Added vector renderer util, broadcast packets now reaching agents --- agent.cpp | 11 ++++- agent.h | 1 + interface.xcodeproj/project.pbxproj | 1 + .../UserInterfaceState.xcuserstate | Bin 103122 -> 103502 bytes main.cpp | 26 ++++++++++-- util.cpp | 38 ++++++++++++++++++ util.h | 1 + 7 files changed, 73 insertions(+), 5 deletions(-) diff --git a/agent.cpp b/agent.cpp index f0f3fe9bc9..63aef1c6b4 100644 --- a/agent.cpp +++ b/agent.cpp @@ -18,7 +18,9 @@ struct AgentList { } agents[MAX_AGENTS]; int num_agents = 0; -// Process an incoming packet that lists the other agents in the area +// +// Process an incoming spaceserver packet telling you about other nearby agents +// void update_agents(char * data, int length) { std::string packet(data, length); //std::string packet("127.0.0.1,"); @@ -39,6 +41,9 @@ void update_agents(char * data, int length) { } } +// +// Look for an agent by it's IP number, add if it does not exist in local list +// int add_agent(std::string * IP) { in_addr_t addr = inet_addr(IP->c_str()); //std::cout << "Checking for " << IP->c_str() << " "; @@ -55,12 +60,14 @@ int add_agent(std::string * IP) { num_agents++; return 1; } else { - std::cout << "Max agents reached!\n"; + std::cout << "Max agents reached fail!\n"; return 0; } } +// // Broadcast data to all the other agents you are aware of, returns 1 for success +// int broadcast(int handle, char * data, int length) { sockaddr_in dest_address; dest_address.sin_family = AF_INET; diff --git a/agent.h b/agent.h index 896b76923f..ec3f42028b 100644 --- a/agent.h +++ b/agent.h @@ -19,5 +19,6 @@ void update_agents(char * data, int length); int add_agent(std::string * IP); +int broadcast(int handle, char * data, int length); #endif diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 18a6cffacf..8930389ea0 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -235,6 +235,7 @@ isa = PBXProject; attributes = { LastUpgradeCheck = 0430; + ORGANIZATIONNAME = "Rosedale Lab"; }; buildConfigurationList = 1DEB923508733DC60010E9CD /* Build configuration list for PBXProject "interface" */; compatibilityVersion = "Xcode 3.2"; diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 13077bb321daeb9cbaa5b117160f46a4a44ffeb5..99d39951b83814b04efb34739c4a0c3e86806a6d 100644 GIT binary patch delta 26756 zcmaIb2bfe<(g*zCh}|j*454oi!oUz5P)S41L4tyaCr~abr@P-<7CpE!`goA9 zW6E8#ieI1f)W+tWuTEsdYQ^ft8pfK$n#Wqj+Qiz$?uhk`^^5h74Tuel-4nYvHYheY zHY7GYHZ}H8Y+CH$*!0*Vu}5PwVvogU#-57JjV+EXiLHsPjqQr$IM(oYl z-q>5Q)3GzLv$1or^Rf417h)Gx$NmT;f~4T`;L6~t;OZbb zC>&G>DhAgFHv~5Zm4eDam7r=+EyxIJ26cl5LF1rV&?0CT+#a+q`c801kQHA)NWAC0& zr%t7`x;5)=Ui8_D$hCutoERl*LiK9ZYu2rlmQkyEwMktkRIgU6TAkTFzk0GrR`w@8 zy{X%T8a3)=R7%Ups8@UQ6<zMi{qxM%u9Q}{cAZIGCe*H8bMr4h zwn&O*-8<}+a>Hh~_<333d#7$MvcKDejEsyrmC`cmR;xDqm!ET^S-nqua_!*Rxxd_T z!=T9FBdziPwQA>4u32MJ*9jRlGiv1#oOHxh^1E74q-&8LvwQxOGP~lhy^3WG{70Jw zd;jEAyI#hmE)#0hsJnULuaQ{f-cRn_JRo;va?*9t>h-JDNo&9DNODs3%ge{Ej$IQg z5Jnb-ktJbdSr}OnMplK9HDP337}*d;HinT+x!1>%%FNC=+c@{(SW-$-&Dpb#Rmy#& za8lvi3$dh>*{@xwlA8#UBDs@dNd)-i?v8bhWrmThVdRA{vMr3f7)G{-k(a{Ajxh4_N|j^XV?9(>D3+;w ztWS7({xI@N7}*(KoQw|ADXm6MnE<)1m(x9>mtiw&jJsA@+Q1V{B7wb8Jg&YwU$Eawv=( z4kPb{kt1Q`Xc##bMvjM(6Dwmc#9U+NJl!^oFClFQ{@NKFcI52Yr3+ab8jRY~oFeBIi0 zxU=Vw{(YMd9W`WR82REo_qaER1ktYTIbH{t-sL4ksrgzPhsTeF!IYPm4nhjT2Myipj=R1y{=e{l)^gD3-w6t;2TlJY2&xCo z^Dh31uvO3|r{R*|=HQl~br{VTMlTPektM;cSL9_Uj7D=Clu7DYwL{P)r{U6|V{li{ zDd-$VuMDGCh0*+B^y;O--6{u}LANk^&7UC^3Zu!n`^zNVQ>}Nd8RpidYruqfyk zMho^xF4Ve3%T7H9jp{q1*@%wA`}X~d$34Nlxi_RIl}lU`3=Qu4OK*4>E&La1R4_WH z;dB2*e_RmeG+Ysk4<-Z?gGs^UAQ9w*(V}6rSQw3k(IAY*!)QtvExsbSU*%v*@Sw^n z2h+mnwSTfL5k|}B4f2w4WB1lATIOC+F6roHb!z3FE0>g#m>oQoN9D;dT5?hFbQn$j z{|jMmFh8f^qF`PaEww0E5JpSiJ#fg#zQgtY9Y;zdB?%$#ycp-T4FCN>&X!>8G zmxJATd0ZT<30@6$h0(MyS}u%M2%{C325bI&zVYWwxj)~pPjqQNd}yChy+@9?`_8^2 z{^U90FJo^9@8mShGj-rk^bL8#Ji1}DT&ID9d-needrfd8IQAFa<6*SYUuI4Pr~h|O z&jlCrO7eX0eu>~h7_AaUtA^2POH>Xn1t0#Isf;jMBlofEk|vk>Jow_TTKRhzt^SwY zuY+&$D(bpBz7Kx<&wd~L6h>?P&&Cz}n$uuO@LTZD;P)_EJB-!~qYeM9eoZ_ne)(V9 zc-}4^kHn*4v`!eUoBLdP(sd={SH}zfA4H*eau}^2MjPZ_OiwCPA{I~iKY-%#Yr|-x zFxogbwL((*U8Um{a~iIUr^U;}%f`#a%g3*ar^hRV(WYUvSs1-3j5ZIWEy8HaFxo1N z-n>%f_zlWSk5`UYiC2wR`*SUB$w|)13ZsL=Xd;Z}gwbiaxfPOD6mAf2l+$ogykQt^ zy(r!|jJCPI|n37;T^D>E7HoDkk;G?v&GGQM_{)z2o1?LA-0cS5Ctf@yvL)cvif7 zyhl7c-ZPBe8AdyV(T-vCt}xmujCKyAT~@?<$NR+l#`^`U;sXkt4x@L6(XL^%Zx|gI zM$3lLd%}E&b05AwX>H*V@saUSu8NP2kBN_skBg7bZFNJ^UHN;3(QaWhGmQ4hU3NoK z!`g|Q9!ugm@i2aW7|jZ!J;G@Ad#;L4i9Z;u>e{+b-ytIh_8ZuDco^*-M$7)WrN*bl zAI>dvV^W!X(+$lVly3EJjm3Y$(*EZ%{`jAlJ^#CzKX;Wc^oc)N@QoZ{w0D^Ay?+_a zjnA3=M~NV^IR1>Gd84cTH9s#tKlj5MlPV`I$SqPSsZQj%__Cb3vzvVybc!#JuTVL@ zDvb6Eqy7KP=zwy~UdS30Uvu+_|JUieQd+f1J(5ce?>nSV-{Cz*_w7A$=CUV7#rNkG*4trpWbQ+ilPX<(DE?mF#NjYHI(KK~q;hRf z#83UF4dbW7=-B^h!}z(J28-k8G_y_Ti{!jaS9RDPYjt`>~aw}I! zDpTT%_*ef2@OAu~FghuWPR<=zC8>1chxm{GYio)B694Tl4*v|J;s0<*k<(ytN>a*Y zVf6km`rv;!q(oA#{NFB8u1d)tMjr^HQ|`{YarYTGa_I1KO~*g?zYl>ag;I*-)D8ao z%$QO%rC1o98b%+=O|6=gS|}x@xT{jGO(~&IN@_}}+}2f-+NM36S58lc(OJh+(o)K# zluaq;s+966*Cn+}DV214N`>65Rg-Q{Ob?^8!szTA$KwC#kCYoyZp`Zs>0$KoF#5>9 zx+5p+Ujcrcl9AJ6NlNvU8Ywly=%ZnDMi_nU-}S!g-)-@)hWX#+pVH`0>CFEs|BnA( z_5Z&J>yy+j@0C&^<>nlDG0hI6PyCxoo17ktQc8r;hyU!2l-p9;^+>Le`%dws;<;Z} zOG-|72VTBdJ2O!4w!lfgB1vM1h$U zm`j2A6j(%oB@|djfz=dPOMwj(c#{Hur@${1_${|o^`sJs(iAL1!EzM5j)D~^cs&K1 zQt&1UwxD1u3bv+TTMFJz!8<8<7X`aeFq4AaDL9IPPf&0z1-Dc15QXwlC`O@L6lzbQ zdngoA=za>#q|hu9h2~OdK7|%i=s5~4rqCJ+t)tNM6xvLo7bx^K$;C;oMRE_42a!C4 zjU;a&c{j=LkbI2f z6C|G^`3%V)lKdsfUsJd#g3Re#kNyy2gP2Y*sB!VO|jQ0_8qZO z#BL)tk=R;dZxK62>;$pXBw}ZYy-(~SvG0lfPJkdna0S6t1OY(`K`KFMf-(f@1QiKx zAgD`lD?wX=y9l}y+)FTwU=l%sU<$!hf@uUZ2xbz@k`T-#SVFLgU^~HXg0~4S5PU*B zAMtC57bISocv0fziB}PJA=*t;Am@zLWSa;;#|kM|?l= zcM`9J_+1q5Oz}Y!A42hA6dz9UkrW?I@v#&i zPw|NqpG@%_ia$*8M<~98;%h0sp5kv&{4m9jQ2Z;3|3vX$xV8+}-oUk$xVAOd-pREc zxppXtYsYf!c&>esYj<<)>y)^h5(Owxh!X86(U}r=Q(`0~CQ%|miTf!rg%UF;F_RLj zD6xhT>nQO8C3a9^4<$aN#NQ}+H6_bXvI-@uQ8I&))g_c{Ldj;7Y(vSmlx#=I_LRJn zl9`muqGS(B_M~KQO3tR_PD=ir)DooDBK0OxTabD)sjW$EOKLk(dy#rCse?%!PU=Wf zN0a&hsSlDmJwfWDq&`OKY*L>hHJ8+7q`pY%Zc-1DdWh6xq@E!46shM)y+G;*r2dW6 ze~|hEsXvqYE2XZXR6$B5Q>qB1iczXRrG`;z5~UK9nv(Z9l~U6vHG@(!DK(2y&roU+ zrFKy2ElRyjsr{5XK&exdIzy?CDfKC(KBv?dl=_lVUsEd2`9CT32c?rJosZHHO4p)v zHl=4$dMBm7BrQLQv^Z(SNh?WODbmW2R*tkfq%|e&Cem&utu<-4lGcT^uB7!KttV-{ zNgF`gJ){jHZ9Hi+NPC*JC8RAQZ8d3YNn20aX41BjwvDu164K6+_7Q2HkoGxgUy$}s z(*B@K5@qsHCPJCpDAS2Dy(!a|GJ_~Hgfhb@Gn_KxC^LaFlPHs*%v8#(q0C;&yhEAe zlsQS6Gn6?;na?TnMIP{H%KS>1e^TZT$|g}ZA7vwyEkfC1lnp4GLfLC6TavP+DBFUv z*_3^Vvg;`O4rRZk?5~vlC*=xKE}3#gC|8Vf0p(ICcP-^gQmz!`5)~+SJ>^ z#gtz{`R6IWiSnB%zlHL-Q~o67FH!y@%6~%n&nW*l%74jq zrMRv&*Nx@66%Jo$>6einA^i%{^OJrJ>1m{2Px_6dS0lYT=`~4jCLz5!>9>&H zhV-_i-$8l@((fX@H|e8EPmum7>5q{EO7o>klg+^3pMTHItD%?edZdB+_g={MHqQXEb+)IVQR2WKyF;tjOg{4&3 zMTLD-*iVInR5(P1b5wYr3SUqm&%xJJ_?8O)pu!JS_=$=UDqcawtEhN26$?-?qYxDf zQ?WJ`@1){5Dn3iaH>mg#6~Cn7*Hrw5ioa0tx7o;dX`GNsq{9L4p8YsD*c^GUs3rgDi@)0F)G)ma&s!Tq;hX6 z52ErADo>~KY$`uR<&9K+iOMfi`9muIoyuQPC7CM4NmMC8m3maUi7G9qGL$M~sWP4_ zOR2JsDjTTsEmeM{%0H=kJyokywI)>uQgt|0M^ZJHs?Sk%F;(BD>Jh3Qqw3F8yNqg= zQ>_No8d9w>)%r@PHiT-!s5YHyv#ItJ)wWRW6{@{TwewW_gleCWaRnL4WE3Hz5gDz> zxP^>CWQ-zX3>kCDSVG1!G7ge)f{asS{6NMZR8OLMDXLeX`t=E_x1o9ms^3NRK~x__ z^)XbRP4)RyUr6;gss1k24^#bTYFtK*%c+q@jq9m#BQ#sa>4f)v4Wp+Ks5)kJ>}2eIKU5w^H|lh!&i&MRL_(bz)LBoRZPeLLog>saOP%x7`Gvaqs2ic~_0+9S z-I~;GPu;tzn@Qb?)P0b;4^ejmbzh|JOVm9_-H)mJDfOJ3X! zZyfa|P;VafmQrsy^`eUfSfcnd+zmod< zsehFE$Ep7Z4X&g?ej3!IK_eP8p+Q#~^rk_gFAW~0!BaHIrNJ&5?4!Yc8vH-oxk8{ft+;WmzE|R$A60NVNb#+?Tr1kx@euUODXuXft@6q}wZ3@sPpiK&G zZlz5}+H|7LEZWSa&3xLtPn%C^^EtPc<<=XywKBI3MU02g}EnPpQ z>-Tj1k*+_{^=C4pWL`;TQ8Hs>#>p&BW(hJ&k$F9tmB_40W_2=aky$rE<^VFMkU5LY z*2mS^LM%>(JepS66tiSO1BKU)udY;y4^yzTj_Qi-P+Tw1Kpmb+e*4^rQ0^T z?WEgoy1h=fH|h2c-44<12;Gj;?L1le$SOeA^<-5it0q}>$f{4)&1AKvaaJd?x{%eC ztZrm=Co7w*UStg=YdBe>$Qnb|II<>?HHoaLWKGNcp>9uB zn$Y7GdbFX(!}NH9M2{!wv4I{h(&Ht1?4ZY6^mv;d`{{9j9;fJWh8`c&<5PNkPLD6> z@g+UJp~r9Z_=D`r$c~VGCD~V#-GJ=QWcMMvAK3%RzL)GFWZy^jcnR4P$qvb$LiR&s zKS}n}WX~ge0ol)zy_D?bWUnH76WOnly_@Ve$$pFMx5<8&?89UqCHoxNKhyIvdR|V? zE9jY@o+ap6ik@loEJx40?^-=a(Q`6AAEDD`RJ8QuOjp+O|LTaDo3yD=v9GU*VF4pdex;@19~;4SF9<$ZlYHU zdbOh0?ew}MH@$6A>BIzjO{P~!uPOAJL9dzgT0yVp>9vVo2k3Q-UMJ}F9ld^|*YEVc zj^0)1U5(zi(Yq79yU=?Cy(iLpGQD4>_Z#%yOP?$0Q2oK2GD-BwqR&|Rg!FlU zK2zv3i$1gI^CW$qqR(RbETzu|`fQ@lR{Fe1pB?nsNuPc6IY6Ix>GK|aj?w2NeXpc% z4f^(??=1T6rtdlWeoo&n==+s~zTeXKd;0!Fzia3hqhFkUCFxh1er4%bm3|rYt4+Up z^lM1JX7p=LzgG0?NWcE{8%DoL^h?lh3jH3U-*o!Tq~9$1JxRX>^xICq*XXw=54e|p zZ`1D#{m#?x0{t%0?_>Jcp?_2Q-$wu2>EDU|UFe@l|L*kfOaB4%znA_)=s$}7bLsyA z{dduSAN}{!{~-Mj)BimEFVO$*^#7XvcYjC!AL#!x{ePqX9}KvL0Ra zgMMLfJ_bh^oXX&I23KV8O$@%3!M8DZ1cN6scrt_MGk6(;S1|Z>1|MMXK?Z-$;O`jx zJws9%lFpEd3~9}fJ4p=b$dKU-nZS@q3|YaD=NYnzA$dx_GZaHBF|-y#>o9Z>Lq{=m z3`6HKbO}S3G4vgV9%txDhW^H|2*a*mSZ#(iVOTSU4Pe-P3>(3)84`v)&9FHP+s3e6 z410}X7a8_B!@l6Y{M=WR`(oVJl>1t9->uv?l>5eV-+1m@z3{whIe9kmjuIyGJGt<$20tChCj>jMGW7^@b?≷OWIBEpC( z7*UQ9l^9Wl5qB}7J0r3gF@X_N7%`O*OBu0_5gQos4kL~;;v^$}WF$sj#>g~ACa!1X zjf`x|$h#QXnUVJ~ay%m^GIA*+*D-PfBM&q3G$YS4@;63B78TAySav8OqQLiy-52HS2)K`r9hQ#Rnj4sOP7^53A`esJAW^_+R-^1uZjDCR8 zk23l(Mz3V_Mn-RD^chBf$mow5{Rd;NWK4d>+{l=kjH%6-u8ir;n7)je%9xpqnZ=kj zjM*Y#%nOWpk1=N$bB?h|jLpy3YZzORu^Ei5!Pt(B&0=g1#y-T@#~J$sW4AJPCu4Up z_5x!+W9;7;SDbNW8CRZhw=nJw#&uxa2*yoh++@bhOE7LJ{oX5wW`yqt+um{^C2^_bX`iT5yZ5EExHaV`_*Gx0Sh z?q}jVO#F(8KQZwaCe>roO-yRRqz9PvD3cyz(u+*m&7{|v^bv_kUoz=yCKq9H2_~m9 zxh|8NF}XRDvzXkE$pe@?lgV?KJeSEkn7oI{Z!-ByCjZFfpGlM?aUFR-Gbh@R=s@DG zyh0<9AQ6&SMq)jQ=OrZmLE@hz{vf9fIUUHki=3y)d6t|-Qcz?4Ev>CBXDru1UUb4*#qlr>EG8&m$llplDo1`jsm!Nxo|kp~~- z!H0No9}m9AgGZTq4O3%GjWacIJ5#$bwJTF+Gj%>w7c%ucQ$JzqXFODnhbr+<6&@PN zLz8$Y!9%-vXde&lXIhkLg_u^DX+4-WfNA$I?RlncXW9-PPU7MGJbVogcje*UBp&X| z!;5%$H4m@l;g5LuOCJ83>E)PSiRo3CK9K3dnLd)~+nBzK>8~;U7aqySBM~0Cg-7n- zkq$gEn@8sJ$U+`D&m*7k$Y(rSnnx@0=nXtNLc*gHd2}+5zR9ES^5|h^RAELPX4GRw z4l^ER#v{x)!i=-bIL~A0JXV#*GI*>%j}7Co;XL*_j~(E#gUpOEvlKJan0XH~M=)~~ zGj}p`FEjTgc)S3Q2Rxp_^I<@y$H`GLP?MRuZ%FGwT{=bz@dvX7y**24=m; zte2Se2Txqd6Zv@}nytZ)+^OVFBX=*khsb@8Ifa>X zEptjTrwem>GN(6lmM~{6bJjEGbLM=K;OO~@_14}lt zZy_%&JSXzyx)mhq(rJY%NH%mj7PG{+(EM3Ra7f399k)_93`aVl9 zvg~S>6=PYzvb$N$Y8VD&LppJ(+2)?CS&!mKIE znp&)B%$laG$!5(!*4)dQr&zO)HP5l;1=hUEn%%7Vnl(SO=2zCHvNoNy6JC3!BS-Xa{>sb2+YY(#a5NkhS?bodRmUY*#F2=ez>zc6c7S^?4U2oP6 zV%-qd&1Bsi*3D(zPS)*Z-9FZR&bsed_dV+iu|7q@`fFKVoApgt-;DKrSwDpJ!&v_) z>z`tMF6&=p{chI3&icQx{vWLWfeppkP?inl+0cRwx3S@NHcVo}R5naw!&)|MWy3Z$ zoM6KRHhjQ_KN38DCC}&Q`35}Sg6CWD{2-nm#q(o$ekIRuiIDw5%v+-FrE@IVBQ|BR zsUMpLu;~dl&12I7Hod{7gKRp)rtjJGJDagNoy}F*oWbS}Z0^S9?rdJb=H+Z&$>vYk z{56}uWlKr6T*sCQY-vqm%bjfL$d=J;Nw6hk%Y3#hW6KJ*>}Jc`Y&pP|@7VGiTYhJ2 zX|`5m>kVwZldYL-&0=c~TOVfYBWzvI)@^Lv&eqdxy~Ng!c;PBuD8dWHB)rgt7kctS zZ(dl>3)^^MJ1_jgwtQ@huH$M)1-O zUfRP;Z?fZZb`)SoA$Hu(jxOxz%8p!iJjag3?6|;=&)D%dUari`wRyQNFAwA8alAZ% zmlK@3gD%IvJl&U@H7 zf}Nw-`8qofu=60V7UR`aUMM-mufglJc>M`ppU3M9c>P0O|2waL#hyCsY093P*fX6y zv)S_$d-k*ED0`0cMg`ue#v9dnV;pbX&l^*C;~;OG;EhwfS(Z0%?_5-H1=h)Zy@{bW#4n`ORQqw8uoq1zTepQJ8$34+g*6OD{nu++w*vP0dIfA z+h6kb*X*yv{#xv>!~Tive~|qTvHt}7FR=du4m9DwEgWdWfh8PR%YpSA_=W?&aNsu* z@3iEdcD&P`cV_d>eBN2eI~RH9bKd!agEw-pCI@SCa1;k8b1;X4FLUq>4({dMD|k1V zcZ=|DJKpWgyLa>M9Nt~TyGwZYBi{XzcfaOPeGWC(!=aWOn#G~H9GcIea~%4ZL!WZE zJclcDxGIO+bNFr!XL7hZhkJ7P9u5!U@H7rT#o=5I&*AVh9A3`hmAN|yB<(1W%X@Qq zZvpQ;mpgS}(vHMTjy%DUr#Lc)Bg;6lf+MRrvX&!TIkJr--*V&+jwW&Ra*jqhT8N{C zIog?{eK^{WqXRj5FGojmbTmizar8JxPjd7ON6&HeV~&2xu{09LDs!wV$EtIzCdV3a ztO>^^aBLdKrgLlt$7XVD4#(zl>^+XX&#{Xf`;cQFbL=aQeZ%nz9Iwgo+8nRP@dg}k z!SPlc&*AtCj?d)y6C8h1!tn(hf0pByIQ})qzvcM%9RHCMO*qk(6YV&02PZmkVjd?} za$+?n)^TD3Cw}DQ<(!Oi@+wYV&B+d&%;sb-PWI(we@<@TYN(Ksi~Zr#;Hd*HG@-6b7~H!PIBraPJP0u&pGu4r@rUZ zkDRW;=?0u`#ObD-zKPSfa{4w-&*pUEIZiL;^fFGb;PmsH-o)w8IQ;{sf8zA7oc>%Ty@IpFI2&-bIA=?c zI9r~x>72cnvtv0sp0krUo8as;&Q9m-Zq6R$>>f>kj8~|TzH8K zFLU8lF6`#Q+gv!n#ml)^n2SZZ7;rI#i)FZ2j*ItlaV!@TUow(GSOMSUCfJ^sqX#|%>ap?^%y~m}aTspy}Q(XFhOCR!K zIv>{H!&-b;mk;anVRJrg$%lFJZ}{*#KKy|Xf8zgX;XI?GAkgmryOS~{iD{EUCJD(* zresnk6J@Q)qA1FW*ac-Rh_AZ>DwYL799&#>aj~H&%34rFuw%o5Wi8m(%A%mIt|Zfv zOfr+1Oi6i<|M!2t{LVe+KIc5&o^zfLw@1*E2R%na&q>fTivT^dp=S>C%!Quwpl1X0 zJOn*kpl2)eY=fR>p=T%b3`4J=HxGIXq1Obx<t-UgAg2oU;=`p5S)OJ0YZTS2$>*M z4k0UqY9Lexp#})KAap2%j)l{$30imT3x)wsqAha4n zw?gP%2yKPXHV8cnq30p=5`VuYJ_*97L-;%h zFM#ls5MB!56%bwp;nfgc1L3t0z8%6FAiNR6n;^Uy!doHy41{+<_&o^kf$--L{tCi- z2@w7k!UrIH5W+zSMF+?Uo#0HTTh`1r*g-8cPx*&2SM2>;TG>9Ar zkrNc+95Uq#kWQf`!>V&8VqJD@T z2GPLb5IqW_Qz3dHMCU+sK145o=w%SS9HL7gdKE;kf#?mt4ng$K5d8~8{|3=JA$mVV zABX4;h&~U|mmvBIL|=#Kn-Ki~q8~%_Gl+fx(XSyo0D-FVZRmR!`u0HIN6_~v z^zDVdZ=vr1^!*5ZKSSRr0s0-#?}z?*(7yosuY~^Vp?@v({|)-@g#Nps|6b_7ANn7F z{%z2|1NxtW{%4{8dFcNP`iEh_00UkaI1~n^!NAEda1IR2g@KD)wA z{0RoGhk+Yl;8*`=76h7a{fr z#P&e!Yl!WI*pCqV8Da+^7KB&?VhM<)AeMpHD8wdU&;)~3FgO_o+hMR11`mh9qhN3< z4E`1ZgU7?*DKIz(2G4-Ovte*93|<3+_ru_J82kbTzk|Ub48~z_0)_-b78ojlp)we% zgrRB}nhZmB7;?gp2Zq{U=tLM=0z)fd=usGY3Wi>Vp?3)|^aTuk1w#j5=tmg(8HNtR zP!NU&VJHDZDHzJY&?v+UAbtqMYareNaTmmW5buWg6o?-I@uML=1L89wJ`3WrA$}Uf z=Rte{#1}*SdWf$KK>TKi{}tl5K>SvS-vROaApQ@CKLqhdApRJ{pMm&m5PuiqUqJjT zh#!FXj}ZSE;s+rfg!mxD6A({9JOlAjNEATg5J=QOq6HEzNcbQT=!V1;NE`u)qaiT^ z5;Gw&3lg&-aT+A%L1FiKij) z8YDh|#7B_$3KDxE@hv0{Kp^oWB!ZBLK%yTKgOEr-Vibl=FkBA9jWFBusf4#Pee z?uOwhFnlZwPlw?dFgz26XTk8LFuWdy?}y=cVE7Xl{uYLNU^ocFgD{+c;S>ReGcY^~ z$pT24AXx;-Lm*iW$vQ~7AnAkTF_1hKk~1MW3zD-Tc^V|ogyeikUI@trkh~OM}@O4yh%O`V*wqKmy%dMc!U3+dw_eIlgiKzbgeFM;$`kiHtyDmaiMG8-YY2{M}@6L=6Zk3(iBWL}5NhmiRgGG9aH-;ntZGT%eyKadGQCJLDW$P7Vd z7&2p!wLsPi*=EQ(AlnXEA7r~BI|Z^wKz2H0XFzr)WM@HkHe}C%Kz1QymqPX$$ld_i z0A&9R*}p*cZ;-tcvUfxFUdY}L*#{uI4YJQc_BF_U0NIZq`xRvOLiSt89)RqRkPSjM z0@;4Z4nj5o*-^-uA!mhLGXZiA$hAYx2f1#@O@Z7Iked#<8IYR^xml2#4Y_k5cOm2! zL+%R5ErZ+&$gP6hYRIjD+`hVeIH`~w*O0>)!7o`Q)gm}rBEBVghv zn3xI^zuy0e<6+`Nm{UdK88L=bh-5-4LOKL# zGLjw1iR3}@BE5|C7Sg9k|3V5PMUeWD29Xj-DWnY2DAEKn7?7a=8B76WC_;u7WN;&c z7a2N`p$i!fMTXxX!;#2v3^GhZhU1Xo1Y|e~8O}zAbCKaZWSEZ(cOt`s$nY>SJc0IK{#hvh9F#u~<)4r8??Cwvp!|nW{v#-VE6V>4<@cie z5Xz6D{C*TDu%iMWD)6I%!%)F*P{ATpupAYvKn1H%K>!uJhzkCR3f@Hpdr-kgsNhpn z@Hr}oqJm*mkU|9`s4yQD7NSBkDlA5YrKqq16?US+!%^XpsPGt6IE{b`&q9Tlp~9u8 z@ETNjEh=1w3Rj@QRjBY5RQMn&+=&WbM1?P-!naW2C#diXRQNqA>_LU&$XI}kjmUTy zGEPUv+fy_y{sShK$>h@kwNS1{rrD$ZFd~x$nM#nU8kuU5$%agg$aE+&O+lvLAk&e^bPO_0L#9)ZX$~@-j!YLI z(?!U12{Qc;GW`*m79-Pj$g~WZRv^J6l6XEnU6;18OVGFGS5TiiwMZP1evcy=BttUI%Hms%qx-kW@KKA%6Th%ugWm)5!c9GVey_w~+ZAWPTr+KSbue$h;4k_apNU$ovyB|ANdD$Wn|f zhagLN09mZaQiCjY$l^wpR%B^M79X;7Bg+(IIUQNfLzel-av`!TK$d05ay_!FK$ca= zvKm>|Aj=)dau>2}M3zU8Wh=5gjx0|g%hSm69J0KLEFU6EU@x-lLzeH6wZLPh7Jq6<*b#i(c@Dq4h!u0TagQPH)i=y6o^4k|i; ziVmWpUQ`rAMR8P=L`7*-ltY1H6Dqc#;&N1MMa4C!xDFLJqv94+>_)|2RD32Xz6ljS zjf!7J#jl{^H&O8?sQ5Ef{0%DJhl=;3;vZ1)K~x+>#Sv88kBU>MID<;^P)QjosX!%@ z2&kkUl}tt@c2v@eO4?D050!MIlA}?{aj4{XsN_^s`V1<49hJU;N5TDh;F3K2$n@N{3MCFe*)<(ljb-KxN0Fvc;(Eo&YL)2$gL|WzV6qS5et+ zRQ5h9`xKRZiOPCVSrC;CqOv$DOQNz7RGyE@3sHF$Dz8Q5HdNk-%9~Mn3o3V`axW@B z5|tl=%BP|7<52ktsQe^U{yS72I2DzjkIH|K$}d9Y3sCtYRK5h2Z$;%hQTYp~{AE=B z3MwB*6-B6`1XYxwiV9S5DyldiRs0@RT!bnXpo&LO#j~hlC#rZ6RlJNUa;VaTDlMq8 z1XY%zK;_w}asjGbh${bxDi@>5D^caOsB#&qT!AWAqsqUc$~#cyeW-F9s@#q$pGK9> zp~@Fft;pJmtbSxY3|Wsv)?<)$8nT{*tfwRE1;~04vR;a;e?Znn$a)pBUW2TELe?9Q z^)JYJ8?tUj)(4RF5oCP~S+^tW)5!W<09ju^*4@bZKC*s=tluE(cgXrZvi=8I4{jsOn@?bqcCF z6;+*!ssi&+)g7qn0aW!6s(J)fZADd2qpD|7RWGVapsFOQN~5YQs?I~z1*m!|s-A_a zPeIjlQ1$7k`aD!UA5}kys-HyFPowJRQ1$bu`gK(O2C7b=8c>Y^1!@XVjST zYM(;2&!XDrQSD2pb{DFB4b|>OwQr%?{irsKCY7K`t!UC*G-(BzbPt-e4^0|GlQL*h z4owb$6~1J!k*x57*)B%5g~+xP*{(sh<;b=Y*=|I(HORIW+14T324s5(*|s3tHe}m@Y)>KE zPGox#*>)k@+sO7AvVD$hKOx&M$kvN&VHB|SAzKXD;>eanwlu0Yqxxc0UyAB0P<<7u zuSNAXRNsi|n^FCARKF0_FGuy8Q2j1c|2?XoKn)G3!HXK&QNz)w;aJo#9W~5A4Kq>0 zEYvU?HJnC34Hu$@i&4Wu)NmPU_#~H9m?OpZ_)VB5K@) z8ec<=Z=lAvQRBO)aSv+jLzBzUE^J{C34 zM$MO^=Ic@OM%4TSYW^n*G=GemKSRy`Le2Y7^Fh=cMa@H~Ig6UdQ1dvl8=tCN zMD}WAuR-=EWOpEY3$pu>{ZM2-3fZS3`)`r`cx0c2?6Z-54zizt>`RdSW@O(=K=z%; z{sOY^M)tRm{T*a~AK5=d_AinB8)V;y?E8`Z2V{>SM-6hgkYg%x%tDS+kYf&VoPiu? zBgb6ixEwi_A;v_(BF6^gcnCSRAjekZcszg{PawzB$nhF->_(0^kmF6{_yjpV zLyqr}<3GspU*zaPju3K0kz)WkhENNrB@eX}q82l1DMl@&sHFn6RH2p@)N(RvIUluL zjau$REia*#gQz7ihMX4UEJn^!D&(w1PA_sEj+{p#=P}4R4LOfP&J&Px4sy;% z&I^$9V&q(ioR=Zz<;b}dId4SH)yTO9IoBfRI^ni z&Yj5lCUU-wobMv%9_0K8IX^|t?~wBVavns^AaX{KvmZGJku!swqsTddTn6MSK(0fO zs}{MO$mK?^cI5IQS2uDUj$B6}*Hi*>%|Ncxk?Tz4ItRJtA=iB5x)8Y*Bi9wkbtQ6L zgUk!u}ttw*lgk?S7hx(~Vjfm{zE*ZasdfZUbH-GtnB?)}L9J#zny+y{|6h}?b19f%=!9C<(R5JbRJn7vvd0t@)_cjarXJttX<^Gg0f=sC6!C zJs-7RfLbp`t?N+hJ*agPYTb-lA4IKNQ0rFIx&yVoj9Onot#6^$cTns5sP#kC`Z;R- zihx?bL9P3cHxGGj$a^UA9)rBcBJXtMoq@ckBk!5Wy8wAFMczLk?;_;A0(qAr@3qLg z40+cg?>gjNkG!`d?*`=Eh`gJScQf*CN8Wdl_jBa^1$id|sI39DwW78YP}_N^Z3$|- z0kz$Z+MYyh&!D!QsO?46whOhrirS*6Z5XwsP}>-42es#+_CnNNirVW?djo23LhY@n z{Up>r8?`S+?bo69<*0onY7g9u+W(5$Z$a&MqxLPR{ZZ7u4Ylt;?a!n34^jJPsQnkz z-iJDBP)9xLn29>hLLKLzjtf!80@QIS>i7fdScE#RMIFmf#|qT33U#bT9qUlXM%3{j z>UawUIzp&p2z4Y-M+$XhP-hwHY(SlpQKuJmcA(BK)OjfC{0-_n5_Qf%oikDAEYvw0 zbI|dKDC(R*zGCD%1o_I5&x(9C$XADa4&-wppBMQ$kgp5*rXt@o`bsdhnjzV42P}gy&>jczwF6z1nbuC3*>rvM>)b%>*dJ}cMjk?}NU3*a1 ze~{mR{D&Za74kPDzZdyCkiQH04@Led$Uh7DXCwb<$bTjZ_|HN9xyXMp@?V1dmm~iY zi*aN0{P9x+5i9m delta 26572 zcmZtM2Y8g#);|1S)Qkre2xXoLB^X*jrT5;XND)O)N(c!M0v39&lSdS+h@yn92uLqd zRYVZ605%X%dbju9j^}^Hb3DH9`+d`{UGBBkz4t25erD$F6_JC>A~S4i{2W&E!sWWBib{X5gi^K5gi#F6&)QN6CE2J z7abp+5Y3I=8+|xBBRV^p7hN4)6I~l!7hNCS5PdefF}f-GYV@_}>(Mu&d!zfJ`=f70 z--#ZKUWi_dei;2I`f>D==%>-oqMt{}(EAczIYL5ZMb zP%0=LTpg4T(t>M(Dna$2VbCaOTth>Y&&D>!o{K#n+Zo#xdo8v<_Fn8z>~QR4 z>{RS@?0oE#*q5;%V?V`yPL`aId`a>Z$;FdPCZ{HsPfkm|Ci&XrYRR>e>nAs!HMn5Q zgms(N7ySN_#6Ia`XY}5*V&P963-vku)$tp9POM$0N@|0KH8-vOWO-tV-Zy+XU}5ix zH5=AySg%TIgE|c-_nKI%Mx7cpXASx4@uI!H|DbKH?|ViWyFA{mz@JbZia zi8X81t5YSlW{p~PW)=7~FVcHO)66fPnf1)CT}ozNK6Bsj^ocd=HfT^KwPC}0lY36A z*`Q&~DycPQ75nYJ;=LCa|EYAhKaJF@TPNR0-TIr>|CSg{%)I20Ly^4alM*UMYBs7_ zE4Age3rPvJE~`-R%YqjQ?hX?dhlxwV#ARXP@-T5_n7ArTToYdUOL*zU@Y2P3jiL!< zXC+>&I_v(6Mf2uH6O#Yz>^)pG@2hA+azg!C-=vk9m36q#tmn@b&ubebB+qJdvg)jB zPA65pA}%|(WO&K`T<<9$OJDt>HU1n+T%GHn0&)zD*2yiAnbiEhYQ8>~ls7*}C|Ioi zzmaSIMmC6$8Rcc88a|^7Iu0qYs6b>AO5S<%+I)Cl+qVuB*q6?#oqEAMjiY^Wl_l1f3!^Ahk#J9r4x5LDD!o+vO z!~-j$OQK7o%c9Rjmq%AbS4LNbi3h{P6Jg?~Vd7_D;_qQ3A&guVMk-|{rS#}Oba|!H z2?_1CM0eyiSsvXQ-4=Z=`h4_-==SJ~Vd8sX;-N6{aF}=`Ong5~{2)v`x?BZSqq|hF zKl+NQxk-OI@>p(CZoM$^c;1kdgl}(uJGb#ue{I--+{RD+weTQ%D0(8_%;D&f==;$R zqDQ00qQ}F;lVReiF!6MlcqU9d8z!C$6VES~rs@>~qi3RLqvxXMb2IYIexX1@=jfNcJ9oXM?~s85t{*yj$fz*!!-MW{XY}jn zH@!M{Z9gP?z__jhMm-(<*3kdaPf9rWAMqc;#83V$@l*8Y%%mcnyWTi@RNsDg4fsoR zN%WWKuZI4Art)r1O-RZsUpgT<-hSlB(E~>Qz2-lHf8;h<8U0rvNC++oE)6aV5`#!^ zd6@WlnD|AQ_+^;*Rham7nD|YY_-&Z@U6}a&N>zg^f-8foR1FFRg;foT{JG6Pgo!_f zi9i3@{v}NO_0Ps{nMuj*TeoR5V#Lr9ZEwFT@7L6Xx8f;5nK1FEr~X_^kQ$Wj{oj>z z9WZK1kQ$UT^w!Q@yABxjr=x<4!VSqf8_NjpHTYJ zTZ7y38dpjvAHOQ-74-fakr76&{F_%+&^NcqQ~!f#c5qj2ljXsHASW0Y3<_=!?g;J- zBL%}qp)gW7j3kATB4MOx7%8?q7_4eAG#I9;s=>%GQv6R|(J)dbzmd$J{=!@2H`98`2U6mpL4~pKs^i zf%HKbsq(j-VOmObM^*%TsMymg(d<7roHeM2Z6nq?f5=Lr-k=kLT-e2NZ z1)m3B{kM$&S;d2|gKxq}%`j3c@7YQT6?+6f2EY6t#IM0`VWdtNshg3Xv)Q+g8akro zhyi`ex419&e_teGVwe7J+1O>V#4u7nj5NrrRyiT9+ZC~5xlLBYu8dt3D;O&jD;!IT z6^RuMBaOmH<1o@Bj5G}+&B93YFw!E7v|OP=EUJRESaPgHtYoZI{w1(gZc=VW7#SEw z#)pv!VdU<-m6a2g7fFj%&TX`8YbY(&IE-}2w=^K{{c94kvs&h6 zE{?SdBOU(|4q|O$H|9239%~zG7rPm0i&c5|#ttV^tG!LP$e*D%s8jAVt89)G^F!%P2}H~-p%HAVWwGGkc^#QMhi#rnsx zV*~PfT$ga`6&YdVwlH#Q7|G1rc3nb~7I)-kE{WY4yDK(0jC2npy~0TPK?Pz%W5Z%y zdbQ6UFl5y2IkyiO5k`83ksg1a7XGZQu~D(nc{f%`xcbtkV`K6a%gN=elqtA zBPIV*GwIK6@4TmKCKNB8TR1aU80iyU`p3UjOpV=>SLX7BAo1zgy@vkTEb#A{>9PCr zKChBcHR1lel2sGxCq5K=G`GR5;hzMxV~@pVsTzACjPwm7{r*gW{^eV4&B(m`(IH>| z|DIDP&-%LXHSvGP^@p(q`3_hZMshNficicxG}oA1C3Rx`26d{W)~Qiv^5nn$vN*Op zx5={D)3GJ7rLkqPXTr#!Fmihsxg(6+8Ak3}7SxNajIBy29$OPegpt8vWN2=mFfu$d zsc@&huJd{5aYYMuz0Aua;2d%I&co`2#P8 zkzsisR!b<~X?JYTfAcZ+Y8V;upL~qHk=yv`*xuN_FfuZXjQNY@Rk62X@BW`;IS@M- zMn;8^(RsI4PbgdJ{n)Yp12`T#5k|&_k#TvCR!_J(<818Q|Ef8$i?NUX=J0VCnfMon z&+<8Z9{VDUObR3Mzd3vz`|kgk_&)YS7?~VKrsNf@kx=@E*srmF{vX62v44e;+%OVm zCKYbqx=rf={YDQQIN(1eVaUG^f`^hXO}-%o05y>ove{?Q~aJV@?aQwIM?CWf72m3njGY(gS0U6 zKp46A-^q}h@t@#GE}fgXBsnFyO!C!XWLg-R9!Bo_E0VkXI}85P{XcP@T=~!0`~SyP zE^l?sgaRG^N67y#va%C8=f9JSCfCZ9zn+J~$U}e8sGFO)I5`qVrv8}>$qkYlW+qk2 z>su?K{8gQYW)FBexl#T>?B62=^GE)TYm(eFuTQOn%9l1XG{2FSx4u?F@k?7Kx5|61 zRzjtoeJMDQf}*SDfk2h^C&o%f{Q5l6a|-1@C6EXxi zuB1>w3KgbMk-TcP6H3MFP^cb-8c?Vag_=^R1%bb%suDAJT7H&CPtMfy|Z4vLJV$Rvu~ zOOXdDGLs^+DYAqj%P6v(A}cAfks_NZdO1alQ#7DxGDS;Lv^+&CQgkpyC&Vc_iK0^| znoH69Df%Eqk5cqQihfMdPbvC2MSq~^PZVoKv70E?g<@SPb}PmDP%MjL>nOIJVmm0d zlVUGZY#+tmq<9L&ucdeuidUm}4T{GbQoJ$6r%?Psia$*8ITW8q@dXrLMDeF6{xrqc zQhYtdpQZRFif^I#Hi~~lv?$S*M28W5is*|(_Y&Pt^lhT=5`B;8VWOW9{g&wW#G}6u z{f+2939cZxil7KVF@h*TNrKV@WeBPeG$&|5(4OEHf=q&Jg5d-s3C0mjAeclD5=FsNYYDaxyd*9-L~xwoCt`?QLM)Nk<;0SSl_OS#SOa2>h&3VBj93d|j}n_l zY&o%NGyNNh8)t;BW^+ez$YVy_U}PwXJEd|Uq_xm29w3M8kId=1Ihk=%gfMkKc* zxjo4pNxqTfn@H|La#xbGNbW~+Hpw|84>57z2qjYUb z*QInbO1GeND@wN(SGp~wZ=!S;N_VC7t(5Lg>FJc-Na?er6ei^wQtFY?fRx6hG$o}4 zDXmDkjg(AM`jV1E${6_l(}(I){wG=l-;E4A!RQq z`$>6=l!K%kBIO7vr%Cyklut?df|Rc)gEE&;<}%7eDDw}>q*ErFGQ%k|k}~5cGl4Rb zC=*gyTQH)W)PX zCAB%Jok;CWYFAQkCAB-Ly-DpuY8I(ONDWDSfYdxv=aRaJ)Tc;&n$+c_t|WCeshdfC zm(-)A9w+q_sb@(2MqKK5r2ataPo(}r*_M>;K-un;?Mc}z%J!pdHf3`tJD9RVDLb69 zBPlz9vQJWWJ7r&{>^{oAN!fQOdw{a1D0_yoUr_cd%6>!H?!j@^?HFk%NION^8Pd*CsV0>gQK=o3+EeLfD&0b*ZdAIBN*Qq~ zWm2gxmHJcZPAbi$(i|#nrqT{7?WEFfD(#`t0V=&mr888@H*kSUA5!UKDt$_&&#Cka zm42hrKdJODDko6+QYt4>`C2NsrSf1Z&!Te8=c#;@%IBzjfyx)D{3VsY=Gw7bJDzJN za_wZUjdN|7m(w)il>!&I_9L$SbX%il3D;c`f0Qb-sIrABFHvO|RX(7~S*o0;%0HspLRnJoOJk>6vT0yE6rdn01 z)uUPi;???7?JlYfq1qg(EvDKMs(nbcuc-D7)l;aRM)k^6&!Bn^)dx}iL8?Da_1RQ^ ziR!OYeJ|C&p!$zg|Ct(H>lJFfO06%b^&_=@rgm{^r%?N9YB#6$ z4b<*H?cvm(Kf3AMka_V?5&MxD~siI<^H6Y8{~PCM!h zqRw#YjHJ%P)R|44In>!joxRlAPn|EQ^CNYBrfxEI%Tl*Ib#J8ZZPe{S-67N+OWpC* zT|wPvsk@1~hpBstx@V|&3GsSYQm-KO(x_LRdNrxni+cU3H-LJRsW**!_fhXj>aC#O zD(dZ_-dogrhk8FzAN4Pxektl#pne+l+fn~!>fb{BG1QM!KcxQC)L%{gwc_f(PW^YO ze~|iLQa@kWFEmJ@K^hGz)1WmCZlu9YG#EyM@idr7gC#UrLxXiRI6#AAG&n(n-)Wdg z!^>${mxj$~*n)=HG#pIBp)`DihI8UHoJYfLG~7kQ-8B4;hWQHqpiu=HRi#mN8fDSw zb{gGDqep2pk46h^$+G`^q457Ick znZ`S4ypzVC()c?X|3H%{P0G+Dl_ni%(v>E+(quACrqSd+nyjVCR+>CVlT$SLh$f%V z^h%l*r)fabTWQ*drdc$7lBO$Yx{9WUXnK-()6+CdpxG5PyNYJjXjY$Q4QbYmW*Icg zq}h0y-A%K5XttVWn`yR{X7AJN49(8b{0f>Eqj{9(4QSqy=GW6ahvq|RKAh%{(R@D5 z7t;J?an1M9{7qUU(V_$`O3@;R7DH(Xjy=kMQK@_mfdKXLCZ{9uBGKx zT0Tds%V<@QR)uMmL8}~E4WiX1TD?fCmuU4pt^P@?f5o}J8P~Vt`u1G^AlE<6^|QJD z0M{Sm`V+KHrS&zmzK+%-X+4S7Q)vAntzV_}>$JI?Hc7N8N}HQ#(}Om>Xfua4i)pij zHWz90C2hW@ZDrcVYtpthZSSP*DB6yp?RwfiN81-@`z!4(rClQJn$zwE+I67aOxn$* z-F(`epxs5HS6c%Swuv`?dbW#a9}&^}K4koG%h z{~GPzphE&3uAswJbZAb88|cu14)@Yw1|4S7;XOK>pu;IT26Vidj%DeXO~=7>97@Oa zbbOAEFVOK=Z_==2$#zTn0V+}M>HZ{@}X+_;Pz zmviG+-1sv${z~UobnZy!8|gfi&JWRf2AxmS`C~eN%1t%7sS!6d;ihTa^awXS%1y`O z+;pCsE^_lV++2&B>u~c_ZhnZHXK?d7-24GIAEQeOUDD`MnJ(k#64GTVUG~%E5M7RN zODS%tz%6OqGKpL6<(BE(@)oz`&woJIWV)86YrH&Nd(gEnUHj8@HC;E;bt~O2r&|); ziqfqE-MZ54R=O>u+cR`qLAMKZ`+{y?acdoJZOX09x%Cllox`p3xb-J)!)=#vTROL8 zb6XC#tt5WiMsC~8ZU3Zugzo>Kdq=u=qx)@ipG)_r>AsZi=jr}A-M^$qReIE;M+16{ zqQ_)<#Obk<9&gZNA3Y=VEKJWL^t_p#J?WWF&xQ1ShMp_L_57BeztQub^lDG9Tjuaf>c z>HA23kMuL7pCkPv(mx^nGt$2%{aez1ApH+|m!x-jdRL@(WqQZ2qjwW}H>Y0AYdT*om3-o@O-h1f%I=%PN`yjo4A|ru}6f)Ats7%InWK<)g zF&RzC=s-p%GCGrSbAEZhg^X@w+(t%!GIGecos2ul7)-`cGKP~efs9FHJW9snWaN=C zFRxI?gsbDvk?{f<2gx{2#z``Mq7V99LZ349sYIV^=yNlDdeSGIKI7;U(q}4t7SU%p zeOA)vBl>(zpKr;mO=c4^n~^!0%xPraN9GbT*O0l6c;i_Evke22_q zWS$`N6q#qpJV)k5GQTGCJ2HPH^A|FIC-YxqRVS+>Sv|<=MOFq`nPl}NYXDh8$Qnl0 zXtKtUHBnsFePlgA)+1y+O4cm0=8!dytc7GfL)Ip;wvhD#Suc|H5?Qa1^(t9!kad8p zFUb0lte?sHjjTWDTbRB@>06w>G5VII@9p#*LEkui!~Bo?==%VDAENIg^qo!Lx%6E? z-zVw2hravh`yqY5qVG5K{hq!*(eE<)Md(+Ieo^|x=vRV%rRbMJzpLq2g?`oPSBrji z=vR+^4d~Y>U*>xHwV_`x`eo2Bi+=s{n%$Uibv&fsRQk=Q-%|QLL%&`0+e^Rw^!tc@ zU(@ef`j?=8Ir>+ie@pszpnoU&52XJv`j4Rh2Kqlw|Lyeuo$N%iFDJV#**B5hh3vb? z9!>UG;@RWKzL)IjWZy^j{bbK3dk)!4$bN?Gm1M6Wdp+43$=*TsF0x-C`!%xnlKm#x zzcZi`18!r$y$smGfCCIT#eg#mIM09&8Sn`MJ|~B`oC4%rNlp?u#mEWBDNjyCa;_z( zDmgXCsY_0MavG6y13BsBWRo+ToRQ>=BWEHxQ^=W0&b{Q^N6w?oBk-1FvUb2L^Ux;7ts?g@HX8*qecw4D83i+Zi~6fvXs} znSnbPxRZgq8Tcv#4>Ir&1J5$>0s}u{;HM1yf`MP>>-&yD7<37P5*c(kg9qT~F@4*{n-YGzB)*Tk-sG+? zx$7tH`h~$o7+jLUr5Rk0!Oa=mlEDKRJdD937(A1~a~V9J!P^+Ti^01Ye2T#zG58aP z6k$k7hLmPVQ--u<$PEn1VaQO13@1KhK0}r=w4C~FX0Sp_+u#jO7Fzg|Qt!CI} zal^JU>@dSlG3*S({$Tj!3@^a&It*{h@a7Ef&+xk#K7`?qGJGDx7cl%)hQH15cNzW# z!+&J>&y0vNq6{Na8PSpv9T?Gx5&ao)7bAu+;(<6LW-;OkM(kk3Ym9h<5nnUn7e@TX z$QUD28Ci~z?HPFsBfBwjC?m%)asnggFmf>?moV~WM($(en~eO7k>4}&M@AKAR0^Z6 zW>gDCwP#ekBclc|Y6zo-F=`H@7BgxIqh4jy+l+dbQC~Ca7e@WY=!%T4#^@T1Zp-MK z7~O@@xs1M_(GN0uEu*(G`Z-1)WAu4OUu5(jjJcdK1sGF{_?X6wY08*e8PkU`S&SLW zm|Vu(&6ov@S;m;d|?~M6_u~##;GGni0Y-h%HXKYW#PGIa*#@@@= zCmFkfv8x#S8e`vK>;Z9Oe_-q%jKjFnjH}4FN{qXKaa|bKm2ndpcMs#HF>WQ}HZpEA z;|?+IB;!sqz98cR#wRnr3FF%^z8&KSGJY82M=<^o#?N8=JjU;5{GmAGk1+msCL}W9 zawarjLQ5uG&xG5VFoFrAn2^VWrcgZgCf&=V8BChVq^(Ta z$)uN=be>6{GwDkvS7mZNCO2U6SSIH(`EDk!Ve%FxZ)5UNCZA*S1*SxpQkeLZB21~m zl)6l*&y+4q>BW@ZOqt4*hnO;hDeIZ?98+Fk$~mTd#*{CJClN1^|8oxUX2jd&-;)u) zgZN0|qlwQY{xtEW#6Kqf4e{^DZ6+?a9l7nveSq9q}S#m!m_jAH3gmnq)6W&WW zgK#F{3BrqnA8~hG?rz52Ex7wZ?tYxRXLI+P+PXhN`$~_&qryKX&#yyX7&qD5bl6y{b&&S;JDfd?5-WuFni+hK0?|AN= z$h|wb_ciW)gK3ynfN57U?RutlVp?aWO=ntsCet2c+CiorXWB`o$C#eV^m0re#Ps1z zAIbF1Oy9xuolO6g`!40aMDFX#eZ9G_5BDwQzO~%9p8I~_zCXAh_jl(0?%dy#`=8+c zMcn@s@%xW*|2gizzymQJNacZYJdnWyIXp0k2UhdIW**qe17GsMPdxAo4>sY!Haysl z2dDGkOdfoU2M_Y#aUML$L&bQgG!K>Gp@BR!jE6?>&7KXd>s#0<>660 zJeh~%Jp4Kjzsti1nNgA%<(W~D8R^W(W=0M(wlQNDGj{Vx0UjyJBgJ{7504Dwk=uD> zBadw7ksZuTVCEIfyo#CWab{*SGl!YWnYn?P8=3h7GymXGJbE*a_TlqX_5F_0&Q@x%z8c!wuG;E7|* zuFdQw%x=c)kl7C~`ypl@X7(v&pCPY2@w_VJRU>aWc@xN+MBaAt_K^1)a}t?Th&f5j z>BO8{nbVy)dCYl=IZrd^6mvde&L_;R%-ovHtsAT;}d$?t9EV%)GM9yOw!X z#LXMbyfMrh$GpwV+rhk@%sa!pFPQfg^S)-@H_ZEk`Ivtt^9wP*2=j|GKgRr$%&*A& z%FMrx`PGj!4eic%Yr>Dc$Ecjv*0)jPO{(}3of$YV;1BS z_?iXZu^|3C3zJz`mWAb6n8w0uSlEb#Oi%PPn0*k7$s5YAmV2l3Q4k&XNq4jAqFcmgKTz9!r+6WEo5Lv*Zv< zj2)pc0ijnx^f&SdrdtbUBukF$Cet2eQF3#%`%`U_Tn#hN0lDao4BtZBxYcC2a7 zntrUglQn}`Gn@FDCt0(YHP5qVH*5B=<~VCEu;xS7Vr>D|Udh_ptZl;DW~}Yb+AP-g zW9?MdKE&D?tlh}k?X2Cw+Ec9kh_#=v?o!rW#kxYQyOwpeSyxxwx}L1-$GU9R<+AR6 z);-9&HLTmhx^1jG&AN|S_bKZOvOZvaGV2?#z9s9gXZ>*2PhkBd)<4Dim8@UQ`u(gw z#QGzw|Bm&)v;Ge@RA)njI2#(VA&U*Sv*AuQEMUVjHY{huTWmPOh7Z{A8_!VPwtjWfB7dG}{ zV{bN2W8))ie3XsPvvD^Y_ptF3Hh#;-@7Yw6P375CkxlK`bTgZ7Vbh~*n#ZOEY&y=S z3vBw3%}H!7!RAtIZp!AiY`%fbcd$ACkh*y^n`g3lE}Q4Gc?+9gV)HIGf5hgm+59bA zim|0MTgtGdEn9A4OBc3`V#{Q<#M$yRTUN7WEnD7Z%lm9O%9ek!HNw_^u(cCgZ)IzD zwmvOx>uR>HW$Tx0{fVu=u&oW-IB(Vp(3imKUq=Vm2=h z=Eb4BxSSU^@Zv^Z{EQdl-}B;+?5M|%=Im(6j*0BJhaJ<{@g_SCv*UeUO6H}qyi}f- z`tZ^~Ub>x^w(-&~UfRvhf3UL%JBzWiCp-JGGn<_o*}0vaJJ|UPyDnkZWyE(iWmj8v z-N3Gi?7D|t)7bSgyY{i`Onh}}ncr3|lB68FkAyfTef9^sWodF2CMIm;{O*>fFx>aeFC zd#12wI(zPC&rbHd!Jd7*T8dXI@M;>b4(8P{ygH6ock}9gUVV$#0$#hC*UIwR0A3ry zYr}YLXPnpG;I)0cekHFL=kR+i^Sr&C zw}0cE%XlZkJFR%9Bk$bEI}h>B6TFkhJ4bov9PeD<-DZ_MAD&wGn_?2GaNg~ zu}?YnImf=@*f$*em1DniybZ^1;dnQWcjtIdj`!nuHpiE9d^5+la{PIYZ|C?Pj=#q7 z-#GCPPF%r>f}AMKiDXWcD{`_DC$HsX6;9UWWPMH!=HvuUPU2*olOZP`;N(M`+|9{%IC+4R`Ff6U^0c^< zXE_ycsywGEa;h??uH{s1PSxeqcuq~@)P0>G_=gfistI<}%J)&Y1$7 zDax7RoLS77b)4D2nN6J8!kL#ivx_r7b2gE)mvi-o zTgACeoZG^=A2@#r=P%>@<(x0T`J$XJ&iM|U@6P$2#LxHUd>_sa&rO?>#AK35CYjWkOi7!` zL|qGrqNs}%8|s3JU2#!d?5;XC?7)Jo*b9gau&k~ksH-9_ie*t(1iMHwCCMZ+nNE`L z;rso*U+#J5+4}A-u?;7a)8}z*eeVd_g5c*O<=r4r+BIqxNemnFxLcbgO+o8V; z`u)(~3;mOze+u+Zh5l*Ke>C);2mSXz|3lFK9`yek`nN&9|&0>WQUL!LirPlWL45WWDy7eV+62>%7bS3~$Z2;T_dJ0N@)gzts$0}x&U;fEo-62faBydJ_E zAiN2}A4B*P2!96QFCqLLguf?9_$LVe3gO=&JP6?wM1V*+L@FRs2ayJdG(*Gz5f?;! z5D7qJKZqOvk%J*}1Vm;){M3zD1 zF^D`1k#!K+0FiefvKb;+h*m?i z7NYeKZG@;BqU{jvf~X&&y%3!Q(cmLA^ILfKY-{*5dAkqe}e%N4A@}cFc_E) z!GV)u;2an@7Y63Sz&se34+9He;2IdX1qN<|fjeQ~ZWy=^1|Ecg$6??_7+3=XZ@|D? zFt7mz-i3kpVc-)O_zVWVgn_SM;J+|141+cpbi?2YFnBsa1}}iY%VF?F7`zDv7sB8@ zFnB)=qLubO! zY#2HRhR%bb%V6ko7`hUMt_s4?wJ@|4hStN-$1pSmLurT!VxGUM z43EJGFj5F3B`{)wkt!Iefsr~GX@HSt7&#b5&V`XHVB{efc?w3>!pNI2vI#~u!^jsf zvNwGk`36R|!^ln;*##qgFcOB50T_wFNCx79xEbO$h}T2h32`^X+acZsaX-Wlg!m+g zPl5PUh);v~@erR0@i`Em2k|Q)ejUVbg!s)6zcmQ)J0Si5#Fs#P8N~k%@rNP)48+$# z{7r~&g7{{Le*y7r5dQ|^+abOa;=3T;2k|h(2Ou7Ucm@)Jgc%YxNYq2Z2?;kO+9A;e z2|px)2SQ>JB&I-ODkP>s;&@2Rgv2?J_%kFfhs3pzxB(J3?G-`dc1YY0iN%mu3W?>A zSOJNrA+Zh;8zAuxBsN203naEe;tNP@gTziq{0NC(_J)AFAkhbj7$ovAS_GpO7_ElU zMi^~@Q74SLVYD4adtr2c7(Eb1C&B0x7(Ek4gD|=XM*j(;@4@I+82t`Lcf#l{80~}6 zFpLhsXbeU(Fq$XG=r|+=$s$OWL$V%{PDu7ZvKNw*AUOq+Qz1DGl1D>w1|&~`$rX@%6q2hT`5Yu)gyh;FBwvB#dPsf*$id2$Ca^%t3MtQb4K@QYDb8fK(%-ypY--QU^flFi0H%siPn@9a6_Z z>NH550jXJ#IvY~wLMk{HQrAN2R!A*^)DlQN45`N;^(3U8fz-2*S_i3DA@w?>-h|ZK zkopEvc}SZe-3#eSkp3g2Pl5CtNS_Dk%OHI@q_2eZRgk_G_7<^g(36hx#nPVYy8f4Cc%mt8{2buYhSpb=9AagxrZiCF7 zkhvQ&_d#Y6WFCUdD#*MHnT?Qn7cw71<`c+#2AMA*^A%*ihs;lq`4uw1L#7`x!;r}a zAzJ|13dmX@YlmzTWLqKI23Zeedmy_nWd8u!gCIK@vPVMp1jwEa*$W_h5oE7`>|Y>z zHDs@Y?2VAU1G0BP_Fl+70NEvweHgMUA-fi`!M7p15waga_TP~G6tbT~_CJu_0oflQ z`!i&JgY0g|4nZ~#xnjswLCy-fCdjoyt_^Y?$aO$&U&#FdatA?fGUN`0+#ew~6LRN6 z?n2024#C`&kh=Ku{GE`$8}fgH{3DQG3Hetb{~F{s zK>l6GzYqCMkpCF+Uj`xn73BX5`R^eAJ>)}>k3fD9@-fI~AfJOhcfprBSHZ-!FmVG++yoQ1APNwr zh&qHF(S&G4v>`l*BM`?SPC%T5I2Car;u6GVh|3Z45o;0uM0|+&2=Q;krv%04i2oo4 z5J^NDkwc6j?So`NvLe}#>XB9>y^ORG>0P96k#->cfb=ucZ%Dh5LP$}hA*2zcQKU3d z4i!|Rf?8BiA4CO>sGtQEI8lKc6||#*E>z%01-+v@Et1n9u@qE3ae0I3o3M=LKiCZpu*Xx@KRJ5ybKjyjtb|a z!e>$8YpC#bRQM(;T#pI|QDF`h?m>kUs7R>j08}&$6&;0&rlX=`QBe>TJ%EZ9qoSp# z=s{Gp1r_}l6>UdFJ5kXOsMvyvn^AEqDsDr;VmB(Dg^Djh#dA^dJXCxIDqe+(Uq!|L zK*j$=#p_Y=MpXPBD&C2TccJ3lsJIUmCs1(`6=zU!9u<$H5}}eNRN_G;?Wm*+mH1J~ zG*ogrDmfRGoKH|C7od`hQOR6XG7pv9fJzpjlBZG0Dpax>mAs5fHlUJ!p^~krz=IWnw3hDVX%31oN<8D2n!HOTNXGHgPI&B*XEGJJvzpCQAS z$nXO){EQ60BEt|e3?oAV8B)lQMTR}dSd5H812USCu^btl$ml{wH!`*(V;3^|k#RCI z9*T@pk?{m%oPms|AmdDAJQEpbBjbh0crh~0MaFr^cpEa_fsA(|km*Zg z`U;tbkSUK$W5^6L7b0^BGMkXOHi*o1$lQR;&B*LPW*0I~Mdo9W`8Z@g5t&a$=8KW} zQe>Ws%=3_WJ~A&r=Di-bBlDfeycC%qMCON(`4MD(9GRa&=5@&Y7BYW?%pW83R%HHS zuXY9!8}`RO&;e`=ipsQRy^PdMqkE0hOMFN>4?lvry^TsPtS^dI2hZ z2$jBuN945tcU0PsN+YN=jY_kq3{+N#f@LMB%!JA+QCT%At3_q? zsO$(-b|or%9F?s`WzVCsb*O9uDtiZ&Z9-)qqq0v>*=MNiYgG0vD%*j|en4frQCSF; z#ZlP=Diy7_RrI6EGE~`%Drck0g{X2ds$798 zpF)){1X1N0RQU#~+=wdQN0r~8%5PESZORQWrq>_e3aRGCDUB4jZjiy2wUk);w@ zs*$A@S?ZCc9a*}N#g8n#$g)4O9EdEFkYx(89E&W+Bg+hA37(8BvykO1WO)!-o<^3H z$g&z)o=28JWXU1R9%Pw7RYFx$P}Q-h>UdN&167@js+OXvCsEbYsA?6eT8*kAs49)B zvZ!hgsv1YtN22PJQT3^)`b-q8o{g%{LDd(a>WfkJTvRs*a-SAyhqrs?*2{ zvYH6WT8pf8$l8Rgt;pJjtnJ9!g{*#LJrG%^BI^mrIs;ixL)J5pbr!PDLDut;^&({b zGqPTdtT!X;eaQL%vMxo|<;c1MSszE%r;v3evaUhaH<0y%AhK>k)-A}o6Mf|=it25s-j3>C!zX7Q2n8({%}-(B&z=-sy`OhACKy1 zp!!Qu{a;c2-KhR8RR1Ze&jyjb4B73-?n3rnWZw_j4?y;Vk^K;4KMdKYBm1$)J_FfL zLH3!*em=5ai0pHbeIByUNA|0c{W@g75!vrR_P-(fGGt$Y?2jV*6Ue?2*;ga`3&LkVgyp@uTlP=Ok% zP{Y2cVJ2#riyH1l4KJXE&r!oLYTO4kHlW5P)Yy%JjXkJwU)1;q)OZkToQxU|MU97} z#*rL#R1{n#WNKsKta@ z%7Un+2DNmcmH=uw61B`gE%Q;!O{nD-)Up({EJrOXP|KsJsfsi^f5)OrVM zeHgXAf?C(3){Ut39n`uRwSI+Kcc9jvQELdbMp5ekYTav?My(m-5ONeDM=^5jgB%v* zs6vhwf|?EJjW%a&{r-LC84?Ij11!ROFn7oJS+)naFt&a$Z7E z&P$PVE^^+AoQsfi334t&&c7q)!^rs5v3 zwhU^^p|&yP0=Wv2s|2~~kjsr+UgYXTE+29QkZV8Wnu1)%BG>WAH3PX$L9Us|btZDn zL9Thobp>+$1-XJ(BiD7vbt7`!iChbjYZ-DaN3Ipf^(b;Zfm}}`*E-~S1-br#T>nI_ z^~kjmxwatJr^xj+a(#E?rV_ydgT5q za^Hg73z7RC}6IvjBOnL7wZ8=dZ|f5AxiHJWG&g8S?xcc^*ZcCy?i9 z>bMAXT#Y)eLmfAw zj+;@(t*GM;g6g;nb=-?O9zq?jp^kS^$2Qb4h&oGArww)PhdQUD&e^E*GSqo9>U;!s zK8`w{LY*s7=W5jXJnGzmI)6i*zoX75>KsCyBdBu}b&jDfBkC$eUHhP}x*+OupsuN? z>nPN9A?msubzOfhBHtm% zcQo>yhRYY_agGWiG1skZzJ-(hkPF(-zMbSj(oo( zUmx;?k#7L`V#pUqzC7}cA%6k#7bCwB`D>8hhWw4l--7&39|Zy^a4-r?Mu8(x;3yQBjsnM_z-cIO1`5nV zfeTRJVicH*0t-;!8Wgx51#U-yB`B~I1(u`03KV!11^$5oZ=mx7Z=t{j6xf6Un^9m3 z3T#DzZ%|+y^%kODAL=~-^`3=#&qKX;quxhQ?~|za+5i1TPEo=CVcoRx, vec->y, vec->z); + // Draw marker dots for magnitude + glEnd(); + float particle_attenuation_quadratic[] = { 0.0f, 0.0f, 2.0f }; // larger Z = smaller particles + glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, particle_attenuation_quadratic ); + glEnable(GL_POINT_SMOOTH); + glPointSize(10.0); + glBegin(GL_POINTS); + glColor3f(1,0,0); + glVertex3f(vec->x,0,0); + glColor3f(0,1,0); + glVertex3f(0,vec->y,0); + glColor3f(0,0,1); + glVertex3f(0,0,vec->z); + glEnd(); + +} + void render_world_box() { // Show edge of world diff --git a/util.h b/util.h index 2eac21117b..637248b761 100644 --- a/util.h +++ b/util.h @@ -13,6 +13,7 @@ void outstring(char * string, int length); float randFloat(); void render_world_box(); +void render_vector(glm::vec3 * vec); void drawtext(int x, int y, float scale, float rotate, float thick, int mono, char *string, float r=1.0, float g=1.0, float b=1.0); void drawvec3(int x, int y, float scale, float rotate, float thick, int mono, glm::vec3 vec, From e5c94d6c696f6b42aa591b6fe9d423493c0ecbe6 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Sun, 25 Nov 2012 15:32:55 -0800 Subject: [PATCH 016/136] Added test routine makecubes() and key handlers --- .../UserInterfaceState.xcuserstate | Bin 103502 -> 104780 bytes .../xcdebugger/Breakpoints.xcbkptlist | 13 +++++ main.cpp | 53 ++++++++++++++++-- util.cpp | 28 ++++++++- util.h | 4 ++ world.h | 4 +- 6 files changed, 96 insertions(+), 6 deletions(-) diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 99d39951b83814b04efb34739c4a0c3e86806a6d..1aded4aa69d1438242e9e35135534eb0091d71d7 100644 GIT binary patch delta 36515 zcmZ^p2Y6J)*T#Pf_pX4jd+*-1fh44n1jv#~NJuE5_ZE73NCAX|lF)H??AUdMfR$bp z5p0MJu~$Sy>>w(NfLKtm^F6x@tor@I2i!U5yzjhoX3p%rGrM=Q{mqB%x0=nLcfW93 zL`U2GJNov$#M{}C>d0_pJ8~U89K9X=90MIg9U~lL9OE359L0`u#}r4cW4fcxG27AT zSm0RVSn3dtD;(E2u65ktxY=>L!|!NvtaRM%xX-c9vC(nA<3Yy`$D@uX98WoRJDzvE z?0C)bmSdmefa86~$Bsje&mBh{-#AV>zIU8BrIafQ^I3ILwb3WwU?%d&g*!hU_QRj2cJavpYm<~-^A)_KbLo%4I=56&N*r=4dN6r0jUaVSnjQ6iNnC0c2( zbXHOnkJ3fyuH-5CN)KhQGDI1wOi(5&lay+uMyXZiDh*1bGEbSWEKn9IS1Z>j*DBX3 z%arSt8wT6so!R@tq*p}eWQrR-JS zR`x0HDEpN|%3R{ z=BT-9cQsEPs18yGt3%YG>M(Vg>Yb_9srBk&b&0xEyRQ*c* zQ9Z5xr2g!(xth6}yCj$5Qe7^W=4$VXamBjgTwPqLt~6J=tB0$ntCy>{YnW@eYlLg0 ztH@RCb(OeEUDI8cxn{U#x)!00I5;M(Zgr2;B*Eg>3T;IEXcKzY{Q$v$AyLO4zMssK`&C=Xj zgw|e*(Gs+dS|_cuHdGs_jnc+y+u>+I($+wnSU1 z3GE8)8tq!`2JJ@eR_!*eNn5GirLED{Y8$m}+9TRd?P=|K?Pcvv?QQKn?L+Ob_PKUU zJE{Gs{jB|=oz-Q%h3?Q@x?7LZWAzStXFXN#s^{qidVhVWK0+Vs)yL^YdWBx8*XT3! zdc9F!sC)IR^=tHN^;`5?_1pB@b-#X(ey@I?zFOa;@6aFCAJHGxAJZS#cj|le=k*u# z7xlgR+xkBJUHuFFOZ}++mHxGUOh2iAr+=^ip#N-KVq9uC440u9mfi!7-$SK1{*_+F-D;=)+jbgj8db_s5Pb<(~Q~19AmEG zGp;hOHEu9&Hf}eT8>@`Fjn&3FW0P^evCY_FJZ3y$JZV;_l*yX4~>tE zkBv`^&y1gppN(IPGsdsRZ^rM&A70~6<1e$h*}_yz)r>IPnqACPGtEplGt5jg%j{}q zn}f{3<`8qJIm{exjxa}>W6W`8tvS`4W==ORGiR7H%{sH*oMX;2=bH=7%T2Gj++1Nc znJdjz=AGtU=H2E!=Dp^9=2~;3xy9USK4d;@K5af@KC4HX-reRK=9}hQ=3et{^F8yR z`H}gtdBi+wo-uzle=~nK|1keF|1$qJ&swr&w_00mEX~rbwpKeU(u%W^tt_jnm2LI3 zdRhIg!B(L))*5Gxw~DQDtHP?aW?GHbJZrw?wS1PamRZ+ZH(2*stF1NGT5FxP-rC@` zHd>pk2d!<^qt;{AZtFQ~kM+Fuvh{)Wq4kmVvGs}dsddOYY<*@Ow@z3;SU+38S$|q* z-MA%p3wLX`+a2L<>u%?cbVs>k-Erno$1bUcXj8v^WA;iecglIBi!TN6WkNs zwL+dQy?hUxaKu^0@Q6J@#LG`41^s{s-Qv zw6{6k=BGA4``hgIw-!=!A<06zB;eb!zu4>O;7D{NIg%Y69i4>KQb;Z#wG~pdkm7{Y zK}a4Ur3ooZNI62v7gBE_^%c?pAq^JNC?QP{(qti33Te8K<_Kw?kQNE)av@zIq-%t9 zy^wAe(h4EnE2K3-+90GYLV8e0+lBO~ke(3I(_*OfoRD4=(yKyxQ%L)SbU;WS2>I=OUjAp*EPqVWrT&{fZ|Q&ffTi^b#cU484Dj2Iru$DCnO$gby zh3!Rw-Vz(C7c?}^sqQj0Oi>yN-5w6D3<`VgJx!|)`5q4Yrulmv3Jq+;0ek#EXl8^$ z9}S1j`j>ux<3U548wz?n95mlw@M$0kYKui7-xL1P?`Qav-j8eH4H=&FkA8ow?J9ri z2f6;k@5gD&Lgr_~95?xU9U9f*){y-<|LFJA{oOx^Z?QaNcs@wj^g*0&mC0#uYc&)9t!zi5BneSZ~rL0>5ySl zb;si&@0(%olYZxcUjFhUEt>vPZLM_2GogsR;fOu{?H~2kUJTjyh3&6i$a>VLs`f_6 zzd!7M+pm6=o;Suq-;cq0e&thD`#I!0 z6!!fVQ16dCq-cMJe4kzLIq?^KTA(|dha8`W9hdmGf0W@ra7fXetwPQ(!%jzVv7fJ) z@G5nxq0q0wp+--^T`|NdO4?hNe`65-HU{=Q#6aBgFenuYE2{kEh1 zw9uv?Nn!it!R#$Ns%avaamgO`UG0D9=t%9lkiA9NexqOgsvz|i=N-QM_0C(Jw>fWj z`i0a=NS6w!wUFAZcP@9Xz~fvgB!`fk!S6oRFTPrvy3V;dfLZU{;N0lkBqU8px{wSZ znd_ZfocH5#ZWWRxBzF)K;XiV6vHz{FV^be@K6O#t)6Qq`ICl%Fosc4fxF~&)(DCQ&Q$02?3LP`kfi!XS*^vul6g|ALp zoR$)ko|fHpY2b^X6_;W*ij zHC9Qy*r1gpB^i&>Nl3Xu>K?@8`THO5SfHdST`wvzTj_>J$rVx$A@vM`dIhuo`hg#F zgQLyP&h*=k>$yrVrQbza`YQuMIV%uSpCC(M?k2RpYu(4C{@YGysl$~q7X=q8V?*G6 zLh2s^`+J;-Of6E%FN DnpooLK<`qbKQx^)M-kcFMqu1xm@uIX|#~W2&qs=W7jJ}xgrD{C#3P` zfa0sg{t+i5a&A&?zozJn6RQttxQZG?k;89x%$txsZ5G4G6{@5W^QH_grq?)RQ zM~x8Dl|s5Ih`HMT=MM>KYI`-|q8)Wm6Y;3YLb_H+*9Ae#{G)#C;8nY*nHTLTOYMqB z?IxrfgmhyNbW-M(J!tMUdVaRKCLb&Oi5juldqkX8z5 zm5}aSuZ~wI;870UmXckk$%mU65^ky#J@uNnW3N^+iF~sMq3AmkDX3 zkTwNDn*+&w?`6+re}#7Tob;s=7iVU5{dXpBQ7`&UtS(nq;89l!>3$(SAf&)+=vKe_ zbF%kdb=^gUtXDULaG>e$h2S^ShC5U3#f!Uj%gRWJNl(uXZY~*FY3cu-voq?S7ahT0>fd-=2&vUe zLV7t!{fd9(ndDrT-F4~3^tf8P+Td|Hh4h+`UJrua2`F2mLKVtu;W zxgznnqJ{L9koE@AZ~F)QnwaKFaCN*WsFSNR9+yW*?+9ss5cID9?q55lxiVcj7uA{T z>W;^iFQoT`bT9~d-~Y?69lU*9122jhS1)CZr=m`aCG) zi(r!9TG4Yx(c<* zS?aptqM$2XSK)D8Bcx+O`X-1u9uV{G{sL*8f8+1b-Wy%FT@-w~%a6ykTu3K{^lcD) zDp;7F$d|?zEzZu#Oo>VFmeDo%)tKHju$03U@4Cwscrf^%_2*jSS{oY5_br9=!}+27 z5$WCH+ICSj54pDEaXl=g(?a?wi1|4!`2EfGYTzwv zS@11u``HBls6R&=p`!sQ{4(cy+ZW-#8&=;}XXX_Cc?k035 zAcbG%T%Z28n@gJ(*=+gkF1$IoK68EU%U|O8rLx)H#@*_ zT5{5L+;zfr()FT$<=>H!a&sX|fzg!27d&1(ExSvLoAxuE%UnOWe)RAAJ61mJ`iXk~ zzQ5ZyMPFiT?{b}S{TArsS0UTixPBM%C4oNT{|@wVR>&^U0(d2ecP_u&Hrl1N z)1m_SNFlq{XwgE}0{D(vTmT<0WIc@U;5w-#1@LANZ}>|7JxRgrjqqw7E#H^FP3xki zYH3=!mZ4>8Sz1>uTkEFfXt`Q!V993$j7AtwkqQOL#oQq!bBt1;B-*tAx%6^stO_RF1p zlqnetJPW4OS5lRKbqzy6;7-TLQvveI)iJh|yx~` zay;4n`t|c zclyd!hlB3X?h6FnE99It+G-)^27=ma>jR5wgOIzw$8l|wwmDEdR5|a{9?-UGW17a= zZE<>@kb4L@P=ne-+IDS+ZKU>a)Ae>+$5Hu;Jy=QFquOIdBMJxCRaGw-RnahYt@b$e z|1^Z6p3rucDdymQ{fE`p_N$-LIJ3I0VOaf?iiTemzoBi_E(`LH4)S++mzH{j_K>zi zZ>L9=Db9Z^jeZpk6++Gz@))Jrep>3$dV4))!id5yRY6B#pG&+1SBoB}$Cr#K9NySa zQ8}YJ09&WW=?T=2A5j=+g%v019WNwA?E>nI9g=ydz9MN+09P zU!%7gE##65g&f&*wxzA5N1vciy3qH3WR>V;0bHq&%h%}TLaqpuwpXtTh^iKH<$py@ z)3$2Mf}*AbMWz2!R9!$+tFWlq`rHfs>J37!K0oI90r~|(uK6#0V9YoB5(Cv38gyou zP3Tt!q+TK9scZDBggmWjdn;R9yJh-~zWlZN_4*A$zD&q7*6KIuHw$^Dkn5VhZDp&B zTduG2<*(CM=uP@cAY$4BCr{AgH)#^qe&lU2GO|vhx%^9-Rm$zPDr?1yH=o^LH zAmqhD_6qsRu!1f6g8`=d^#}B=LT(iDJR#3tr*G3A(zgqFfshvpc~O~?*c7AKqML@c zwk_?jOMm8nL7vrj>(2>!iIA5H`SPZ3Tiap^Ue;g#U!ia4Z|ZLe*(YQn=$z3&Jk^F)0_2vPyfW1zfr$me_#JV->QG4e;m|$m5{F%@_Zp*Bjjs0 z>Yv(l{jmO-O}FVnzRp)F!B^@lZHiKCu}u+*?K3*!Q* z6o{A*V0tjfG}$N;@-`tq^be*&qr#~AkEUv)M#wva{BV5lt8NW0xA@-I&I*1wKYOi> z%Z$4J!0L@zLVi@pk2M|FZEXu1eR=mA^Njh%0%M`E$XIMFF_s#aw>d22okHFvpc5LyhU*$EewrqB7YseYCu{E}z zyPHxXj7LJw@U^Y6^M77Dwb7moMTReOjc0>2F$XU#3f_GfdyLn8`J0XBjTej;jhBp< zjaQ6Ujn{9wo^8h5W9N6kl^8 zzvEMcd|>Bpx2>(MrL}mmCp{%5Gpp%Hgw3iw32`70rRE#zZD{zk~hg?vKDCpVjkW|Em~b~HPg zoy`=}Bjj&|d`8%1VYdsrCG3&Ho+RvjcE(29WZO_P$L#IP-(=>R-OW5R-|S)bGqF#7p3%(5$RyV9O zmzYbNdc@d9R0#71U;Y;J3iC?yD)Vad8uME7I&+zMy|7;*>@9@7rLeaW_DhAmwXnAl zc89P#x8N~vGH*6-!DHTL-j2r%9(mgpVOND+3k=b&3%e2gU^bnMv3=}aZLSe^*Xod} zb>{lw3z~v*)|nfqA2*_~u(~0badWk~ITRmw&Ddi;aDFPx)#kPf4s*4+BhUyhu}94( zeEI9l$IQpgox<)G_6T8byUyIzIxr8y-Yz%+O=IG0#i@JDzy*%^y!nFpqWO}rM+tkh zu(ucXnDyo>c+A(#*M&VcIEDma@6hyVoNc=Lj`^-HXN|dE*yGli2ZTMoX=1!BviS$* zhfR4&ws>!A^HcM1IN~#5PYjFu!u-;gzxp3)zc#-METd!QH|BBkgn82Z);uNb$->@I z*gFY(XJJnfc8{=k*<^l?$NbSejR%kUi?9bTQtfHNo*NJtNO751!2)9Ms}^;Sgizs#T|C-|?o=fqKS>Z=;3G|Zherh0Br=iD$^j+N)jUu#8J-GkA+ z0v`i%g*|8dw3!vP;n>zzfz>xGx1X>VglPs^gZ?$C)=+DtFMpji%!(XtjS%*}!ro8V z`>(@ejkd-Fr)q$(4{EwE(YCa`HPM=UVYOIA!agueTxyjC)>Oh6Yl>BUVHdY*gnh`l zP05<(%Ufqnw=T102>VcBA0h0c&*NKL_12t!Z_d_St3lX@3H$J-9Z9yt2@9!b-sV^{MMD$HUGh0Yh5SoV}!l1sY|jgaokPT z3Sa(a>t^c~>sISF>vqd;-C->k_Hn{KUf3rH`$S=%BTtvzFgSvY1)!(+oW!>9tbRu`-Q!9jkQ(S%bF@W+A{iWw;m3d zcL;m=8tV~ZuNXVLVp?4|A3Lq5d^zi^C#+r8lfqsp>{EojYMu2oiPp2iULEMk*Ytiz zTUB}BOv8Ff*lW&%P3u)_pD%xt^_um%^@jDP^_I2QdRy3Qg?*~9PZRd(!hV^s&k**R zo2+-N{noqI0n4%uw)sTZ>x8{t*cS==<-*=g*uA3pdrfzBvTaq5Sf5*8(9-(SI%<7o zeQh0Un%LPkuGM^DpDpaOgnePt&d#>{>~DQ#>#S4Och>j9K1bLaguQV;Ev+A|)0Q=P za8-3(!?c=d)pLY>uCR9tZm-ra)|sZ^DYo{_*IK_e4Nb9i=o5%mf+1bcgZ~aT=l!$D zgU7T~xtlpY^damEMDv3|LbvR;2aaJ)d9C{r>VqF!hCD6Zt(s1z*ixFgFKue;v1NPR z4!7pZ`QJaYaqDgakJ}RV#ljxEP_{1(p5b?S)p5`NynQWG7R;=;th#SwW%buoqYKl+#TGB?j(1zyQ8pQ zDePAX`_;mJjj&(4!KS-Y+#Z|mPW6=v`*p&8ec=B$HjU|G8{OL7%^f(;aOVj7vZjZ+ z*gCg%_jLCTSb7Qj4NYHlvBeE{_jeDxQ1tFW!hX}aqIVDV<*juOa}O8xn}z+h^Gmd~ zdz8EI-zDiD>mDcUw+Q>KP2*B+F_G@c?vnpRl)B4={dQsZH{F(Mi!OCnxvT%RnYpLB zr-xNsChRNDtEda8sCUm2_9kJ!Gpu5+d)|NgnD1U7>??(RRa2WZThzd%Zr^_bg!>9% zzf0Kf4t+cItFCOUt*t({^VEf&IS;t6abHVI_jT@NRJm_(-`F%e%{DCN-oQHDBJ3Lv zx^H&h;=a{=8!g?pyZyEi?i+0*-OHOkO|y;k-Y4uEg?+P+_pA#==U(Yv6(~9qg?)pt zuMU-*ujKrM-{&h^=U(kz<6bN5YlMBRu&+B`IoA1F3svF2>&pE=(7XO0bv5>X71qB5 zQ)L?w`0HNoe#}Q;Oq+#$(|IXR1lHa%VZS$2a!n`4Dl%;Gtw+>XRj+kF6F4CV zS=t0Fp}5`d=bB10Y{|{{P#^e^*z{0_O>h39`=zFrGi*uTCTMdHw7DPJ?0`1Apv{ZW z=4EK}2DEt#+Pn>I4nUiO(B?yE^F25$a3p{u5ga|h(F+^};OGmE{@@r0j=|s<3Xb96 z7zvKi;3x#gba2c7M;&+_v%oPI9P_}j5FAUu;RVMP;J6wb*MZ|ca6Ao;gW&iA9KVCp z1I}LHECc5vaNYsV2f(=%oV&pJ6gXc5=gZ)H6`Zew^G$HR2hR7w`4Koj#f$SZP}+jh z7nCwkmVvSYl$D@t0p$Tu9t7nfPhnvoet_;P%j7dT2NPkx*F6ipgs)hQ=skv^<_}s z1ocBuKL+(vP!EH84AkS`>H)4H;2H+55#SmHu1Vl30@pflZ3oxG;Cd8X-p9eU8(e!p zbAlELS~O@epv8gK88i=Q3qZRXv}-|I2HFjv-2vJP(7pui2hdJ~_A_W_KnL_@pcjH( z4tgc%RiM{^UJv?g&>sb_z6bOdK;H}cKG64regO1?pnm}R7oZ;n{cF&_0sRE%-+~bZ z#vm}}gRv2egJAp!##u0JU`k-x!E6cUrC_!PGa1ZIV5WkZ4rV4^%-&%30dpXjgTWjM z<|r`7fH@Y-N-!J2oCl^C%%U`+(860AC~W`Q*atOl^=f%OVl`@s4Xtixb^ z1=caJj)Qd)tkYoq4AvR2egn4++%3TE0e4q$j|TT-a2JEyTL$h5aMyu*7PyxNjNlgF zz7pJ5gZo-=F9Y`qaIXaSo#4J3-1mZeHMrM;`&Dp%2@#h+I53=xzKI_UfKz0 zcO|sD7us!tc3Ys`R%o{k+U(C$5G_dc}y4B8!qcBdfH0g(noc7@2H5IGSd zCqrZrL>5EjG>E(mA{!ub9z-sH$VCvj1R}47$ZNe2xeOw2fXJI5@)?Lc22mD7^@pf3 zh?)UWbr3ZhqUJ)>JcwEVQ8z-=a)@ezsJkKRUWi%^Q4d1YLlE^SL_H2sPe9Z&5Vaek z_CVCz5cN4koq#CsuMqV+M4g3b8$>sU=oS#&3Zh#>v;oo45FG>2@ethsqO&172co+} zbUsA)gy`EK`c8=61kqa{dK*Mgc60f;#WF&{w8rx0@(Vvaz}afr1;tP^5Yh}9w1gxDmA?Fg}* zAvRF|v8fO{A7XtFdlSUo0^g|u0I{1Ob_>LAhuHTZ_6LYP193oH zGl-KR?h=U8A4T0Eiz1@uMNW5aR10egVWU zg7`HMe?P=;h4|ON8^0go4?z5P5dSO0{|*TWkl=xYR7j|YgaweW2ommsg!PcH5fb)5 z!t0RmCM3KC2_HeiCy?+dBpiZ-6OiyNB>V~q0nR@m;cw^wbZ7=0+CT>Do79=f&Brhag14-9G()EyZBP97DX*nb{ z1^lZZX$>U33`u(-={O{vhNPb%=~qbl9g;7B*bX`-K*z+UCwthg9sV73 z`T;tfhE6|2r!&y$H|X>SbovWAorTUe=-eDS%h34}=-d)IUkaVuKxZd(R-v;7osFGy zdfJ*d3q0qh+zBc7K+0-JSqCW_A!Q4sY=xAEAmw35c??pXfRv{o(R90=jgEE`6a(f9Ns=y3BwsbfMlfFQo2))Yl>PO}wQ31gU>P>RCvOhqM$(>jG&dkX8d}Qz7jZNLvYMcS72u zkoF9u?S`~NkoGmCeFNz&AYFxY4bsygy*s4mLwYTw&w}(hkiG)a?}PL;kp3d1zvYGW zw;}yk$gn|1bI6E+jE<1e88QYz#%RbWgp3BrSPB_l$XEdx_d&)Q$an@aUWSZUA>(_< z_zg1tfXsNvOo7ZUkU0c03n6nHWHv(P<&f!vOz$Sh+zy!!L*`qMc@Q!`fXrVY^DJc9 zAgeuOB|}yx$Qle;V<2lRWX*xB#gMfWvhIbfO^~$(vR;6!Hz8{;Wc>nJXQ8VNy2eA- z6zJLox{iRZ6TsVb5_G*1y50y~Z-%bxq3eUt^&#kb2)cd^UB7|sOCei_Y!k9`AiDsv z`$BdxWLHCWEo9#c*{dM?F35favY&?RXCeDN$UX$wpF#HT(5*RilcAdjFWs`CTMl#^ z4c#U~w_@nF6uMml-L8XfTcO)y&}}Dl`vAHffo@+wx4$6A4mm9#CjoLikdq2Ig^*JW zIi--Z2y(83oU0*cBjh{;IXfWdL&*6Ya=!FJ?j?|`K&}gNGaxq)a(h7TWsutdx$_|R zPRLycxf>w&CCJ?ix%(jZTgd$da({*H9ie*$bkBnBlcD<*=w1!oS3vjsp!*u={t9&8 z2i^BW_n)D=_ixAp@;X9Z2IOTyUMb|&Lf$mUy9)Adg1lQG?{Uc64S9PY?>OY0hP?e*yU?A^#M3d+5+33VO7M9tF^2 z2=o{RJ#K&=cR-I7(BnPmaR_>R20dFrPYrq+(6bx#>ryqJQ zhn^2X&nKYglhE@B^gIqdPeQLY(96V2FE{k+4ZQ|Kuc6SZ4tmXpUJIeu-Oy_T^x6cy zUWH!oK(BY9*BR&y^lk>d2SD$U(0erWz8iXPfZm&+_c7@G1N1%(1-+nP5EKl7g6pBc z4+YDi;0rGloP>f?(5E}}=?i`OLmxl%xf}Z23w^$UK0iXApP+9i=$i?ByF%Xv=(`m9 zdZF(g==(bKeG~d!0{s-|=YoFYpn0ktq-8Vpzi1MY_bTVcRa7;p*(d=CTD zU|=o`%!7fAFz|91=!1bT!@##;;5#tL1>QmJU{Dkcnh1j`V9*p8v;zh`34@-7!OdXs zr7*Y+3@(Jh#W1)O25*JIkHO%bF!(eK{u2iO4MV!Yklrw)4-9F9A(z7t9}Ia4hP((v zUWTD9VWUbGC42F(_p%Y-}oiKDA4BY@jkHXMXF!XyEmIlLeVOSmvn+?Mj!LTJT z>?Ihs7l!SF;cgfn3&Z1KxDSRegW)&8@cl6SV;KG^jOYL(y1O~kW!)OPLR$z1?j4p=Jr7(IMjNS>OcflC14aT&BF|A?DXc#jY#uUSthhfZ9Fy{GBlVZWw0qpm-D%PlDniC@zKK3Miff#f?xr zABq=2@lq&$4T|?e@nI-F0>#Im_yiPx3&r0<@fj%o9g6>g62M#10!lnk(hW)qp`;i} zN};3zN~)k_Hk8bT5+9UY0VP*K$u&@N9h6)TB{xFJDk!-dO74S_HBhn+N;W{rCMeks zB@aW%D^T(}l)MEc`=I1qC^?9ik`JNe6DTG>0KS9~gQ1%OyH-~Z=$`vSgLAeg)CX~COyd9J$LU~6h zPl57OD9?cMu25bLN)${YjZpm*RG)yFOQ1%F8WUm&FwgPIWL+yO1 zT?n;4sJ#MeuZG&|p!OE1y&W&L%b|89)ZPcRFGB5SPYS z)q<&QVQLgije)80Ff|dTc7&Vv79VCq{i^)yU# z>o6@Arp3dw1eoT5X{kGpkF>qp%)1X}ybCi9!i*1L#wRf2FwFQIW*mhX$6&?@m~jeb z`~Wk4f*EIECNQ%Z%uIlp9+;U5GY7)VQ8051%$xx;8)4>rn0XJ(+z2x_!_1dq=G!py z9hmtocxV0sGk=9T9qOW>u07NZgt}2sHwNnFLfsOmyBzB7hPn+D*FgP?Q2!RxzYX%-RXFo`YG>!>l7P>p09h39~PS**eTNVRjbG?g6uV!R+ZUdoIjw zgxRZL_F9;|9%jD+v-iR5{V@9vm?L>%jveO2!<-bD(*@=Xg*jtk&UlzJALjUA&J{3c z70g)+bJoM0=U~ojFy{@Ja~kIS33L92xrs1073QYH+~F{HJj|U4bG9MmsbnLSrg4rbA;9G*&@l4Kyx+#;d{Gcr7$O4vo8^aSt?p0gWf2 z@f6HcU|s~wYX|ePU|tWH*9+!Ng?Y1K-dvb>1I)Vv=BPjf0Slgl1y93*!?55OEI1AeTfssN z78+hy*cTQKgM}kt;T%}F7#1#th4;Y1jj(VtEPM$T?uCW>V37?LwSq;hVNnJw%7aBc zU{M_`nh%Q>!lL_N(Pmh5KP-9+79E5|AHZT8EN%shTf^dPSnTZuiwj_JF)Xfz#kH{b zW?0+=i&w$o=V9?1u=p)l{3|T6!II{%Bp#Nez>+SoWFjo7fF)C4$qoP4(tQVaL1lfw z{~Vf-0!T?H5~(31)DU#-U02t>y6U=XTXpSQTy-t*Q?Mc+2m;bvAA0X7#eyOzHhOQ; zAqgdcgfw`E_kHioopa~3IdlKG=icu*Gndzf@Y+ybTg+?Acx^ea74X_gUMr<%Q+l?h zXIm0I|3=Se==mHyr_*x*JwK=CZhGd^vyfi(=+%T?&FFP0y{@Cz4fJ}1UQg5OS$e%g zuYvUXkX{Sv^$oq2(Q6;Qj?n8Ei6$gkoyEOLTuWBoZXf7RDx%m`dVT z5}QbD;q?q&Z_n!;c>O=T-jml8y#6(>|G?`%@D_?d&FS5O-cKj!{U3V2NbjZeUPB)HRi1h-fGEPf90*mc`NY*Z%yE>nY=Zd zw~BeIlDAIt_659s1#e%)+b{EWZ{B{Bx4+=+<-Gkp@6_gLwyL)(dAMfSxULNmVz+~|fxh+V+l0Q&=z9x&|3u$=={uOdqv$(^zQ57;FntSnzb)@~;{7hX|8L%Z zmG^r}c>fFDU(WmA)2}xD&ZS>N`rSysJLz{9{YKMo0{te@?->2c=vP7id+Gl${r^V) zDfFL9|M~Q(aT9={q z8Tuqc|IN_DUQjx(&BVU-NOgyGjR z{CbA>X88LI@6Yg`8NPwxoA~%FH1lx_K0cq1AK>G^^YN2>Je7~<@$mviU_>28Bs1b3 zMm)rbM;I}d5z`nkgAs=rQOt;wj7(!>8%DNcWF{lqGcu2n7clZFMqbOv8yI;LBX3DC z@^(hv&&UTD`7k3NW#r!(`4l5ZF!DP_o?uiPM*Wdd|6tT>jOxXxHyHIMquyauUq+2# z)I>&2X4DKu&1Td*MlEI3w~YFpQ9m;37e=jRRAK|8HZ$q~qskdo!RQ){K9|uA8Qp}@ z%^97+=r)YbWON5c=Q8?MM&HBe`xyNfMnA~te=_=cM!&%5|1$bjM!(7Ew;BBdqt`Hc zC!>F3^g$A%^BH}F(Z?BmiqU5SIL(+~Ol`*0V@w`nZfDG&8S^*BJjR$O81pn^{>zwG z81oKe`ZA_JV+Jy2Fk^-?=3~Z8V9aF3Ok>Op#>`^O9LCIZzA;-EQ^F@L_~aTs`5T|S z%O`{QWbkLp|9bY8oX6M;8GA8fFJtVLjJ<}j*E9A;#@@`>+ZcN%WAA3{y^Ot|v5zwL zamK#O*g=dP%-E@noyXV(j9tap&5Yg3xL{lo<5C!x&A9H2>%q9Q`Dxsr8TS{){g-iX zFs={drZR3G;}$S(Bja{4Za3pGz7FG)8Q+8PmoxrK#`j@-KgJJW{36DG%lPjYU(ST; zOsL6(EGBegLU$(I&xA*r@VGW6^ku?eCVa$%WlZ>)3BNL-gb8Pu7{dlU9L*E|VB&w7 z_zDw;Gx1XOB5@|bi1lOAN!6HI!FN$)UeAd^01(o80O z&ZI?5+RCK8Oxn++Dkj%ra&0EJWpbypiMtDv?_u&oOn!vPeVII%$saL!Hj@`Ic?pyE zFu8!qN0?HFDGiv?h$*>Dxr8Z~G39=yJj#^EnerA>1~6qHQ)V#bGo~zL$`+>VVah(H zoRKiKHdB+Bn#C)57Jw4O{$Fl{)~K4scirv1dU^-SA%R&bPQr}w-N?}$9X5}&~k6Di~>p5nfed*5{%B)YA^(nK~Fl#Hb zwlljrvs0K|m)YHzeKE5yW%gstevaAyWcCPVk7f3FW`E7>ADI0kvx}Hr!R#vLWHP4< zbGk9-FU)z2Ie%x)JIomb`;~dC znU~MJ6U-}NehcPjGCzy?H!}ZD=HJEq*O>nn^WS0q6z0!m{(RMSHt;g1YaHGtK)oC z%-3D``geSNF<(E=*RLh`x))#1=Icd#y@anzS&F6ASb7~xZ)NH2EPa=ygIGG4r5jkf zlcm4$O)I|1=9`Xu^9f`Dp_`ZQ|$V{M?qGGx_-+{QO^jeubaE;OFK1{C$F7lKG_(zohZY-Td+ZzdXb* zpYY2hewo5A`}yT4zZ~b+Zv1*Nzh26(|K-;=__Ytee#5Un^6Ss6s>iA(tZK%pM_KhO ztNy{N#jIM!s^zRcht)UMXLSQs-^%KHSbZO>Utx70R=>sScUk>DtB0_9D68kN`YTp1 zW%W0#{+89NSiNRg+EdZ@rZr<(Gl4aeSu>3_Gg&i-HS<~XIcpZP=1bNrWz90ytYFPb z*8I$xRjgUdnhmVkJgn%cc~*O)_%|0A6fegYge;&9cwqTb_;8_vvwD23t4-VwMDEw!P+X;o?+d^th<54x*J({ zGwW_;-94W%=CW=H>%L_DxvbA%eM{E2VSPK+cV>N8)<41e7g+xi z>tA90Ypj2Z_3yBLDeHe>{VLY4W&L{A?~t&57aPuFLkBkGu%R;>y0YOSHeAAn=h^T& z8+x{(}irhmQB~Q=|(pFflY(i^glNJADhOr zX(F4>>Q=JpG@FCX)!CfF=DKXYlg$sZ`C&Fc%I3$}oS4Dp#cck9&0n+m8@42|C7mrz z*>WCRGT73dEgjhM99v#xOHa1E&X(S6d7mx)*|Lr;zp-TxTlTZ%AX|>JrI@YV*?J{g zuV(9YY`uZ4x0Bdrl21XX{9|j%MowwoYQ}5w=#awTf-nR*h}RY)fU^wQReC zZGU9jpV)RU+a6}y-`F;hZS&Z+fNcxewwP_p*tVQ)6>P7~_9O}0Q`ugR?M>L;jO`Dy z{Ta4D$M%1*{du;(#`a!ppU?Jh*uISIE7<-6+t;vt9XndEBbyx^+0lg^-Pmz4J1%8M zf*t+XF@PO|*fE$Lqu4R#tl+HiZ|vB^j{WR7$d2ReC}w9CJA1J6LUvxv&P&;O9XoGe zXHRzaWoJKje!$K_>>R<)QS4mH&Ry)>&CY%7JiyLl>?~qeHoGol*G23~T*|J?*>wZE zZam9iS3h#Vpl%9POz(l-)`r(zw+CI{PqaHJ<4y- z^4mZ7t&-h!*qzMoy6mpc?xyT+PGa{T+5Hf^A7S@n?EX8u|H^aR|?5!?g zZ%y{rWp91<-p<~?viCvuKEmEd+50Se|H0nz?485jdF=g+y$jj<4SScduMzuNv9AsL zGTE2KzHaR6&b}Ae*PDHBvhN-Cy~n-}**Ap!-4pD;lKoe+|2p>H!2a9We<%C1M@lXIR_SVU>OHia9|||e&)a`4&-yN0S8-h@b?_NgM$xn@Iekf!NC_e z_!0+S=U^WWzRkh+IM|PaA8~Lv2S;*n3 zi#YTJhrZ^}w;Wo|pUa^6New<@FN_4jKfcG_-PKm&Ee@BUd-XMjcA7pIDC@B zRpcj;pF)0P@|%#~ocs*(Talklekby~lHZ;D3&_8M{NIy*H~IIF{}B0qBmZ&opCtbo z@?RkTzvRD4elPOhAipp9LzCVvX~)5xDs{^#T`CjU$Fmy*Ad{GZ8RMgCgyH;})R z{Db5lC;v1BD5yh03I+8jXh1<@3Nk2YML}B%+Eb8CL3awSq~InB&NiYJ+(*G96g)=3 z6BIm6!E+S6M8PW*^rYZ*3i?pckAjaV_=JLK6wILDGYS?_@C5~5Q}8VXKT+^21#2i+ zPr)V%exu+p1*H^}QCN+_ni2|YQanE21k=Pn$FRt9Bs|fL_3aVaWsdcojH0TM=$2+WgNYdqt|fsW{%#?(MLG? zC`X^==s!97JV#&T=*t}K&C$0w`YuP`=jZ^APUh$;j_%-CEsoXW*m)ewVY1IKRS*dICeCyw36vA=NaL5{t`u}?WRlVck>wu@s094p~?b&l8McwLU4 z%kf4WPv>|ujCdWH)yd%fEaQr5Y|BK@Z3CAaLd=|$SaeNuaS95$V$G35OC&zbl zd>_XTa{M^QPjI}H6kR~kMHF31(G?V3P0=4Hx|O0kD7uTH zdnkG+LD7>G{hOk*>o+LsOHqG{22wPbqM;OhLec+GG@hbK6iuaQ9z{zj`jMhd6m6ks z4@LVaIz&+cMMo(*MNtJsrzsAKYfzj@aT>*m^C|8~ac7Dzr1)ZrFQfQMim##gW{Pj4 z_)d!NrubfpAEx*zil3+Wb&7ja{6574C>})d5Q>LU{6C7vQapj;$rMkccs|8nQv5x| z>q!)Eq<9y_dnn#d@ga%}C_YK?DT*s7KFx{XL=q>`IB`BFI&z{jCobf~#hkc|6IXKL z8cy8IiQ71FCnxUa#J!w&o)aTDF^LnaII)=%2PB**;=~zF1}Bp^naaufoNUO+G)|t+ z$<~}~$H^>C=5X?QPCm=YS2;PBlhZi4kdxnXaup}naB?drcX0AIPVVL80Zty{WHBd8 zI9bNY$^<1fDLI#t3`$y3(t(nWlyspamy#ZoTu#YVlw3>64V2tO$sZ|skdh}Td4ZCb zD0!WdK9syo$$OOaqvRt>hEp<BqN6AJ?wotO2l3kP> zqNIS5qm&d;a*~qMlqONykkXcvwx%?P($182qco4w3n{&l(rYNap3)mBy_wRxDSd#_ zrzw4wMCpr^zD((BlqM+cP3ilT4xn@pr9&tkM(O`3olNN*O249XDWxkZ{h88Ll&+<8 z1Eo7D-A(B}N)J++Pw5Fx;Zz+?rE#hWr&@8Ut@ci}=TtVQI&ta(PF=*QOF4A~r>^GI zA2@aIusdIgE6#4#sUJA?Bd6AJY9pt%aB4fJc5&(urwTZAlv72VI?1WilqFM^Mp+xm z+ELb-vTl^+QFb9^7gKf(W!F=7BV{*Jb{l2)QuZii&r$X&iL#!Qy-nGBl=Y+R1Ij+6 zY$Ro4DEpMMagu(dT9ntP zycy-~Depjeu7vU)l>d(MODMmb@*60>iSk=0zn$_wQvMgppQQXhl)piFAIket{sHA5 zQvMO;!zuri@^O?;q~GQtss*^J8eRCb^;m&zVg{*KB^sJxuY8>l?nwOM%!mA6y*M=Jk9<>OTTlggf) zJxVIyrSg3$2T(bP${|#art*JOj-_$}m6NHQL*=ChF#32y{qFQZH?Oai< zp{SN7sx=kWvPHE{qFPr`t-Gk!Lsa{>sP?L;_O7VbS5)gSstpv?28(J#MYTDi+G0`d z3sLQBiKzCisJ2d2+bF7S5!JSfYCA>sr$zPui0W^M>U~7@|B32jMfC}y`eadkny5Zg zRG%ZN&llA{7u6Sw>R*cLOGWi%qWTI^eWj>=KvX|0suzmt$3*pFQN2V|FH4B(m7@9? zQKOouQB%~YEovl-8g)gDb485|QKO}((MHs0Cu;mz)cCuo@uaBnjHvOPs4-U5m?dh= z5jEzE8lQ<81)@fos8JzmoE9~Snmt6#t3}OgMa>&T&BTqOW-n3meNnT&s5wy7{7}?f zA!@D@H8+Twn?=p7qE>xTtA(g_zNpn&)M_hg-6v{2E^0j?YCSD#Ju7PcpQtrc)S4}7 z%@eg2h+6rg)+te|T-2%(wa$oh@O^s)ILwt&JeX*iP~*N?e?N}wy1r*sC}ELeTS%hr>Om)sQs|0 z{ivw@cTxK(QTtg@`#p)M{h_EmMARN8YL5`LXNub2h}u7i+P{d}zlz#xMD6vW_9jt# zpQv3X>ZFJ|^+cU>MV)4%PNt}nBkEiz>Rc)6+$ZWhBU<#Tj1qOmh&rE&I^#s0 ziK5Pwgs3xD)R`~pd@kxN7IjvMI%`CowW7`jQD?KLvrW`FBCYnRFCytd zk@Sd2dQ2oeA(H+jlAaeyFN&m>MbZF~G*BcB5=lcu(lC)ULL^NTNmE49G?BDWBrO(6 zUy7uqB59dOS|O5FiKI0mX}w6=B$5h6(ovB}Iwq2eMN)}KDig`IL~ zxrIn>DU#cW4@uDUC%+x=3jzQd)?VmLjE%Na-q4az#oHk@7o{a*0T}JRwqU z5Ggl`l$%A$Z6f7Sk@C1m`MXGYN~An1QvNAYdWw|SMM`gx@_|ShBvOWmlwl%egh&}J zQWlDofc4`QzG?wk@|v2{jW%U zRiq9SsUM2e!6J32Nc~u(juNSpMIv>oNSz^4XN%N%BK0$o`khGqRithcsXIjK9+A3V zq#hEfM?~szk$OVZtu5-d6m_o?b#E4R?-Ob@ZA{#Vp} zRn(m!>i!_=?v{wU`J!&2s9PfHmWjHRqV5?{ubQY=Pt-eC)N3N@H5c_VM7>s`UY4kr zBkFY)^}30Ae-`!L74_zcdS8foUx|9(iF#{9y>+7AR#9(?h7&}?nWEumqT$z~;kTmUPom*k(QuP!m@gU@iiRbk;VIFuLNu%rjgmy8 z6w#=;Xp|uuwGxfmibm~4qioTrlW5dcBpO{I8eJ_KT_+m-UNriHXmqP+bcbkkmuU2` zX!JMH=yB2L3DM|ZqS5oB(G=0>Gtp?FXtYE$`cgEmAsU}68aEV;(?sJYqVZj#@x!9= z-$dibMdK$#<4L0Ne9`zbiDWR(KJNUB+)cQG`(6hy-hT|Lo~fhH2srk z`k-j~uxL75G#xLRPLznIQ$*8gqUn6m^fS@4P&6$UO)EvyGoo3DW=W!1ifDF~Xm+b; zcDrcyN73wV(d+@y>><%?sAx7;G#f9PO%lzfh-Py|v-zS~sc2q9G_NI^*AdN=Me|0Y zd0Ij=zgaZDPc*+@G=D}k|A%P)Z_)e((fq%n`KzLNFVXxB(fm!({3FqPxM)67G#?|H ze=3@f70oAz=95J8siOIE(R{sVUM9|KEY7=DocFjmueUhwTXEiYao&D$UgDrQFJGK@ zM4VS4&Z`nF>WUWUiWZGTi*(VVnP|~Mv}h?>v=J>bMT-kXi;F~yOGJyyMT@IMi)%%T z8$^p6MT?t7i^oKZ|B4oGiWc)kixr|piO8rUGSWpxOOeRvBr>{)j9ih?LuC9;WLzRL zt`!;Ai;O>rj9W#<9U|iak@1kocuZtGAu^s88UGX+&x?!~MMgqo^b;8aM8;r|F;rxH zEHcK3j88?zIFT`3WGoUHOGL)c5|Ob=WULh#8$`xtk+Dr=>=YThMaDjnaW>*Ikx?l! z&WQ7?iSuiU^J|Oqlg0UU#rd7Z`FDu(9~0-lCC(ow&i_H2Um#j0iI(Z2WmC~IOSH@p zEjx>r-9*d0glKu8XnC<{d6{T=vuJs%XnBWdd6#JUC(-g=(ei%L@&VEEP0@0KXt_|d z+%8&{i&hz;)perPt)kWKqSYOu)t^PHzlc_U6|J5Ut)3CBUKOo+iB@ljR&R<{?}%23 zzM@rs(Q2S*^@(Wpsc1D$w3;YdO%bi8i&nEltGS}pX3?rjv~De0Un^QaCt42>t>=r@ zUx?PrMeDVq^#;*;r)Ygxv_2|Ymy6bwqD@WFrnYEvu4vOvwCN}kZLSn;{vg^sC)&I! z+Vm7{28uR=MVq0b&Bvn6DADE<(dPd|oAIK}B++K7X!E6LvsAQMCfckJZB~jlKZ`c2 zM4Ppu%?8nCvuJZdw5=}Mri->YqV2_^?VY0S;}X%fw`e;|w4EW^ekR&35^WcYw#!7@ z)uQbt(RPPudqA{3EZXLawr3rdh_;(%pD@LP-GqxnZ+WrL}ZqU%u11YMzpUc+Mg@hUm)85UbKHuB--~9?f*}-UoF}n z7VS$#`!dnKQnWuKvZ{%!nj$M*WVIDpnIfx$$m%Gvx`?b?k#(uax>02PL1f)3vhEOB zcZsZfMAic$>uHhotjPMO$a-F6y(qF?7FoR|BCC(c8Yr?p6j>jMtl=VSq{td0vZjix z=^|^E$eJs%7Kp5cB5S$G`d(zM7Fp{=)<%)FMPzLkS^GuSA(2%evW|+ZB9T=gI@A;$ z8i)>!M2DuL!+D~^`3cdXt?1BRbjTJRx`_^#iw;+c4%dhd*NYA}iVio64)=);_lpjH z5gq<2Iy@yhJR>?hFFL#^I=n18ye2yI5gpzZ9o`ci`iTy6M2D>+o5)TT+4V$r1CiZW zWG9-4?B*gnOJrw@>>QEZQDk2uvTqmJcZ%%0MfSZS`+kxAfXM#4$bM2}|5Ic?FS1`0 z*)NOiw?+25BD=rH9w@R0i|pYdd!)!7BeG|U>_sAbg~?@B~YUy;*a z=eX$DNOa5+9lMK;SBj2Ti;mZcj=vWj{~$WvDmuO>I`$DA-x3|)6&>Ff9S4YxgG9%n zqT^W6alGg_O>~?oI?hRmj`KywC8Fb3qT^E0ahd3NOmwO(I<*v?I*3j=qElzlshj9@ zx#)DI=yaRtbf@Tax9D`Q=ybp6^nmE}u;}!t==8kk^rGnWvgq`h=#&thdW%kPiB9i| zP9L4+%@UoKh)%0SrvlMANpwyVopVL!8${bmyG7S~Mc0Q#*GEOyzl*L78+=oQ&lOp$Nk^4`P`@G0~QRMa!xo?Twz9P53$Q>whKN7ja zMeazEJ6Yt;5xI%4MD9|NyIkb{AaZ{axvNF)I+43kN|9S7x+jV5 zDWZEl(Y=A_-dJ>RBD%K{-P?%nS)zN6=-yd$zeIGuOmx3mbiYn?|Gnsbi|Bs4NOb?B z=>AvH{c+L#dC~m^(ft+Cy{G8@y6FCv=>D$g{=VowT6CWvx_>6RuMyn~L|%xz8X~Wj z$g3mrQbb-mk@tI%cbCY!U*!E=mwAy~tZF^45vGjUsQC$lD|G_KUn@BCks1oe@2% zi5@jYj||bHrRdRG^k^q~WQiW#M33%wi5?e-9v6uoSBoCch#vnEJw}Qi<3x{@qQ|eI j$4PO)IpTs8aY2Ls_m2QIs{Mbo$@2e?{{JH`X!QR8SE4z> delta 35097 zcmaHx2Y6J)_x?W%_bz?+_PZg4goFeFA&rm#p@iOB=t)Q+1rP{?gce|T0K15|;@Ty2 zMaAAluy+NeS+FA_VnI<9<@fBa67~P(PssD^oO9lH-kCXPcJ7_L@u2gq9nR~lrV~69 z1G5!JqO?nOZg^n8fM(tfu1>Blt~6JsE8Er6)yI|R8sr-48tEG2%6Cn2O>-5wN?qly znXXx`xvqJx8rMSCV%HMaa@Pu%aIJA&;kw$j&UL-(M%T@*+g%%7TU^^+J6-p=9&|n8 zdcyUzYme)B*UPTgTyMGdxemAvyFPS%?E2hw%yq)`t?LKZPp)6RuHRkf-A&w1cMEqL zx8gS3wmZ@t<8JTn;O^w^;!bmCy0hIq-F@77?m_OM?vd^>?tJ$o_cV8ryVPCop6R~C zUG1)MuXKCeKDThMa<6u;abN1b%zeGv@4msk!F{7U;J(RyvwN$1n|r%^hx=joBko7t zkGY?BzujazuGw`9S$lIj(%IoKU_|eo=l^&MLpDPPLiZTy3G6 zs-@bhM@>*WsEKNlnyGeEv(!AbzdAr2sE$>~spHjrb%Hukournj-Wh7STA?mbYt)5m zt?E;Sx=LNGu2Zj5*Q?j7cc^!&8`TE&UiCh8mwLbYfcl{NkovH?N8PJFr#`Q~t-hn~ zQ}?S!)sNLr)KArK)l=$s>i6n7^$+z=^}NTv}T#Qj5}}wN6^H)>-SKbWZJ}1H)oDVzRJ%gEMq8)(wHvitwA;1K+7@kx zcAvIOdsur?drEs&dqI0qdr5mkdsEw|9n=nKhqa^H$J*!GaqVmEgznT^>aBFQuIPqt z>JfTdJw|V*$Lk4tN4=AtqIcE38G5Fkt>@^udT%{X@2?Nlhv_5r@p`^KSufVh^_luy zeWAWsU!nW-%k-=C>vX?LALqu;MTsz0GWt3R*5s=uZ0*AMC+>YwPx^ppCJ z`f2^Fe%`>4jFv_lLpKb=G@^|dqn#0J#2KB9E=G#c)oWxKeT{xbp3&bJU<@<{8DosG z#yDfVQD_ty(~VMNfl*^DG-{1HW0A4USYfO*yvAx{gK@KQo3YVoFt!@ojJu7U#xCQ2 z<6+|w;~8VO@vO1O*lWCMyk@*^ykWd)954^V!m&FV18(R`XYYn^qSwchdutedPmtUIl();8-N>t5?V>tX9j>m}=D z>lN!AYoB$<`oQ|qI%a)k9k)(dKU$})v(|aLncduOVXL-g>vp6aWk=iTc7~m4ceAtX z?sm4FWB0K8+WqXo_7HoVJ>Jf@C)iW$+4d#&9DA-k&#t!T+Y9U(d%4$MVXw9?x39LZ zwb$F%+c(;`*mu}>+dJ)h?0fC|>|ORl_QUpb_Ve}&_KWsQ_RIF0_FMLQ_5u3?`xE=P z{k46<{?R^d|78Da|LJM!Y2|6{Y2z_HmdEzA^K|m0da^t@o?f0lp1z&|o}r$Rp0N`X z=GD!a^DZrDNh|-BHr*T#`;WA_*%~M02|}JKBIJ!iYAK|a4_)D!;E=u%a_fga zaeuehdD@q|OG)=1*jMK7z2Dzrmy+J*r#3&gIphCuf1y`MO@-82NQRI+LW&Ynf{;22 zsf&=(gw#z)*+S|mq~1arD5N1m8Y861LMj&03?Wqs=@KC=6jHsAyh2(nq|1eLwUE{c z>3SjED5Tqk)F7m-Lb^*x_X_C&Aw40aXN0s@NG}TMRUy48(xrVuIv}JYLi$ihp9tv- zA$=7bXbGj1xsoXNwG~Y_`PfejDyedH_W$wj5Kq^zeM_S~!xvGk4Dj!Luc!aU_jElg z9I&l1pojnI_lou0uzg3Py`TTOgX#X_1G+vS?7O?sH^hJRz)*d7*nUr=eYF3&g9G(( zVf%fJ_KAV%Z5=@}r~my!tvpl00rxis6b4mh`^86H{Pl-Sy*M23U}L}xf9Z#P{fURv z=9OXJ!~Q)-GW_ihw{Lz)*zjn`P=C06^Z8-J;~~SL!|j{bg$+;o_Z&(0Z#WX~SmwXu za1Z~FN7{S5Vb|`)o>m8?^bby0;H>FTBV3n-L-sU=Tonu%=+BEu^Y3{-Mqe8ac&;&E zz5j>zN9s3(?JqRiZwf3h9Q`fVtzpkgjh;Jw5rN%?V~pY26!yK+=-U=VrTcR~)I4{E zov$@I?+t1h?9Yqo<2R3L`u*X6HyQ&T_E#J6{;_eYf8NnZ{jsqBtw#S-{vVEJ_|JUk z_UsP(-f8qbcVRC4lRwh*7sCPj8v|YqNef)-aj1stjj;Q@M)y1Z(htY#?}qIM8|{a} z`LO6?#c;hJ_8xBZ9u0fnJgVxShMn&>I=}S)a5PUp9=3njXg}#M{ji(A-;pT&yRh$Q zqwjQh0$=}FHC$)H-cK66zlFLB^onpOhU<^8`?Cvfx5IziCxbjv*#1SMy?JnMCj0lk zH^86!X^h?~9B`~L!0n&=>CL(pwjXb_TcN@l{+X^vgncI(ebN5;pH1+@hV3UC?Fqro z!u8Sob5sAT{7*FoboQrxK286t{J(FsXZUabeEQ#2|6`-QhyRE7aXwnWJ)AexuOEmgshcbN6=TZLNUn%)u0(`#B-S38{Su6CWDWmnYh%zTw_? z(Z2S(-zCL;Ku8^glo-M#`NcOKyzUR&pIj95srxfh++PT(laP`_pw97mj~1qEau^L$ z>eIT!W~XJWm|CCJBO~~6#b00ESnd<!|RwOr0yY%?mv7oD#ubHFN%p$ zqQjURA@vAj;`1Jz+-Ao7YiIdCJ*lTAD4i|}PF6aH!M%i(`&WnazKu%FP_liwTa-+t zo06q;7g8S~^%YV-A?0mRa+DrnOn)H__zM$E(%(+C%^9c+yJ%m-l@VdwAR!IDAav4K zU%5V684@~PnH=miUzwmxR3-^&sE~#UX}FL^Y*D5tQ%O+@gfvn}qe9@({)XfA{ubXS zrB*1jgSbkiN|~w564F>9jT6#%A?0sTE>Y%?qRbQ01R+ff;UNCs5FqG zY!&iPp;2X-D%3I3YLaG%~ zosbp@Y4H|ipR%76a%k)|Ft-3D^QdC1oSBU1)l_B1%{L<+T-nMGH zi)x8g<495Cg>;RO)`l?G#^*gc?bNz<6Gn!1tIUk-6;tXn({uhUq?6k9qMWH}TDZ!t z6ViGi<+l`4a6g#z&8a@WKUY|vnUkFvOu+8hjhmmEt>#{oueaKV6t$m_{6e}Rq;Z3P z_?eEWgVhnf+%4)5b*MT_9WJDRkZuyv%|g0mi#k#r71nU8kZucMf?r4V{%)tD($pzx z(M3C(t`?J`mI~<(A>A1QZuI|rrejW(I_IL8x#~Pp)cHc%B&5wD%$DGSUQ(TT>|y`L zU!qbMsr45HFIQKPqI!k2O-S2A;2r*Ne(9Kcsd`lq^S|m)y;{A76!lsm-7Tb@AW-b#vkyO8b^(ykEZe*bO1CZ}#zcLZl~i@H_arfwI~gF<>pNDm9? zkuB<7>fNNM_Xz1xAw3qtJRaOwDfNBLM&kE^DAw3l$f7(ChY{wqzi|T6^9mVVF8>Faj32C>Go(+Na1gE;} z-35oL{P&-YPE+4gk6g6N_tg(bQ9lyWb3%GPgnq&Q$JxX*^>g+3ML}PyCrD9G3h5;w zy&M9);?MsrIZgdh{pF%Me^t+tqW&(V*M#(X2=s>k-rqXpI5g+QFj_OMIVoC8A-yG} zw?mkBLTNPrnfUqsmcK`(Dw=sQm71m5q-YUB+ApMcL*V!P6Ms({*iK8hXonrNL{hYl zLOLjp z^B(!C>c>a+oeAxT85vps;F_t;y{O80S~V%!0wEn2($^uX6G4@;ej0xJ%OR@nJ<@}l zYv#W+I#r-byqkbc~vt`l$AJko_?s{obHGA*6Fb_Kwd4*>?-+k4E-A+D`2`5`}a=#QmqQ=-=b)8+gUx z&@}B8?X@7`t3q}(Xs-*oNszF^+d;y2gxs`|aKE-wdyhmR%Rxd}@)h}A=X(cgngm(i z*FFrgd?4gz4cbRSZXRO!G|2LqkXtmee4*{sjs;m-g;-kpiu`;27#p0!(O&JOcGj1B zkM^y0O8ZXxUi(4&Q9G^ur2VX&(SFf>6>=LPyM?R@Sr@V?WLwA)LXH%2w2<2gIZnv& z_h`Rqzia2TKeRu!^E$diZ=yHVC0!PB2O)P9a;lIsg`6$qUPA6GBIKkEsv|yDAK@$Bcp)$K(fZh6&=?_iYS70CIXM{A zPM_#2ZqO$Qx$^-|=u`Bm!Q`aey+$w8i}W^uUCkZsjV?m&DrDtdTIeNusb1z7qt6I@ z+1$}7BSmqBGF7k8D+@;F51UtBv1Dv%?aWPj71jSVgrjEZvjhEFI3k-~qR$CTZ{g_W zt=1Pbs;UukTJS|?D~{=PL0yZ4oc>>3%k&~$^U++$86jEPKV^A^oT4;q9P}!EP2+k9ZsGc-BKiFzid{}?KaY8;2^5DM(9}NorSja>ED>yhIo&PZ*QH^BB_2d(c z!w(itqwkddebDutkcVHWsi^La{-5<<8io8D`n&!|&~;A8qZ;%-g*-Y~X;X|QzTyU> zsgTF~N1@S7FVg#l6pjr|X~I92%xE24vX&S6HQa{k%WW_eA&+k`G$H2)9&6)hA88wH zeYu+qj}al{i9(*V$%r(fggjZuQvyG=ag?=>H+(rf2Mz0`Kj?u&DDdb`ymk7CZ zvyp4`Hu?y;Ovp2YTpmbwJMQQ(#2E3vLPr{-jL|}_6mpf2X9j+9J7V)D7*qaN=u~5x zQ6S{mLcT=EbK?7cPw3waVmmY;_OGNeN`yS{v5AUfc9Yy0#%y2iHY3NVFe;6n#!O>Y zNaH*qR|~mR$n%A~V4HD?!!YI=^Bji55OR%gx{wzNxi0X#=D63pm=?woV`(r?YiG@^ z2pt8#8voEtA=ieI)u=a?hx4*xlTlA~v7%0>E-zggTUr~Np3yTcJ9g|SgsNvpn#$ z;TYG-*c@)I^x^;Hz@S#fj&R%O!>@c1=N}tq2R51x)n(ipcC7O8&#MrZ@nG1#CTI`c zDFwz`4lT-f%y`n5v&neectXgR3HkC(##6@ALcT)CR|b5Rqhpluyzx>n;sxVHAzv-z zYc?4#8?Oj?t&p$%E8=Zqe=y>mV8nHyhx6f7V-^3 z-VoSlIU=KsFO08(5nl#50wIp$#@9l=Nys<<#qpi-<9~FVHhvQFtwO#n@Pr-YIBT5y zPsAU_pF+Mv$ae<5w;gTsoBDd)ZAzwWI?ZNgbF+on(rjh6HroifLCBkhyj95Cg?yKg zcMADlA@36M144fA!g85PcoG}$?o8vtEsCc9b*yN-!80R+-T`_{7}j`mXSVpQcPKmE&5hS??TXuP{K(}HjI`|FuuN8{z4nSJ3NMAiQ~dNf|)nSK6#iLU5* zVQAy^ojJ%q_Sk@wmf_2^Ca?Qw9PP+(@Ix1p(;WNHy9{?h=pDkRKQF6GDDc$WICR=^dn))6HU1aGM1rnB~ET3;7u#?-o)^ zUsEAJ?NfyO>|;A49c>-Wt@_|==gh3YTTu?XYQC?yA;|png~Bvz%@x7U>&!*wVsnYP z)LdrPo6Ci~N634H{G5=V7xD{2eo@FT3Hjw6B$8nIOku7vSCe2~>MIKN_KJ{S74qA{ z+0Ca2XWzicXvb0SdeiUAZD=_~G7`@fp@&82M9g2P{zF&1L6~Xww z!!7eZ^ReJKcbWH_510>{519{}kC=}N`5hte6Y_o`zboYTgnU5A2ZelShxxeqg!!cT zl=-yzjJex+5i{F#uy7xE9n*+e*-38yBUDUZ3@Ib_EO^EGq7FL%57y7`9rrumln zw)u{^Psm4v{JxMs5b}pY{z%A2h5Ye$5=b!*kihHaVN!fbX!$ z&L3Youc~4|b=|yLA)gF=0a&u-oc7l-C%>Y0lhussf3y|Hek%-bDdg|Mh}Kq{fEw?x zWS8ZpI{48D^ow`ole3i-567TrHn_*=N`HziZ zFM9oIEu2tOQPC)7wslFMW3nUO+uEwO7Bu2&gi~#dsr$4TdPR1)>xO4LW*^TaE9*ToRPwr5R@I9 z{m#>e4IC73COO{oW@oMF5?h~dq(Dp3$IRETYhUpP}~i|8P#CjD4fy%zd_w% z-R8?}ux=I3m1;cPc`*1Xz^n#$4{{}_3LwK*8HNjPI0tS!PBH!0s+U03tZkR4X= z!NR&rINLW^JB2fTN?q-&IbCPWpFj5RxoK_PZ#~$k>>=Uo@VCpd9<_q+A^y_wgm5M{ zx}LW723K~IwbOdm+9RB?!Wl2T&LrXNxW(ET`gt*E`s;tEDPwD@%j;&;E}S~PVqr+; z!bY;!tT%nRL8>=G@yWrDLAk;iKWWz7(yGST*4De$fkwFpg|kZ|&5N7kpo zR6S~aY<(h}U4=7MIMX(hVtsCX5n8)+;mizN)5)=-opr)G*_b}x3TH+m@%Ppb!6Zu< zZ~bfq9|*#Gy!ESaX8pBgS-<;wZMM!?e^`GCXLsT3A)L8?<6GMfTl)9bZOgV(IJ1Q_ zCvZoyBQdg-?fOrE+g60Lr*QTPe4Olvi!^P|e*z-xw!+z4IQs-5J3A65wzE6>a(CFV zcAVYbj<*x+4tAoQB%J+(Gfz1C3+Djg94MTFgmbWP4%tD9olF9Wc8cBAPPNlQlQGn% z_=<#cu5fyV(|Eg-)?oJ$&f$U4T^tz$``ZJ9<^jSvqQM>{ zoFgZUDxEd2anQr;QNEnb_HcWIJyJMF3Fm0x9JARTO`<(kIL8Kix+L&I7e{%?BwulZ zJy|%%{SCJ4X?B?}ce`C+7urSkbi3Ftu}g(>ym00V=LF%LD4dgobFy$w*>2CU%k2ug z(z?r@+2(!WoGP5tgtJ08X9;I};hZg+?h9Oz;nq-D`)pya63!ywEEdj^{j{*x*q2&&O&eBTF|T%3<*bSt z;hZj&%3u`C8nlgUU@}L@uG?uwbeBptLDt@ z*>h3FjLg{N*hLjHIu$F4Q)X0`S9BdVaL|ZZwO#$mN<<(v-O;S2{XlRL9~90xfqvIZrsNg>$}eE)dR|t=0&8xBaZcu=fh5Q#cn2XPs}l za4rtqlxHZEO8)cugzxz+b_x2CM zxk5Ns1`cI7V!ZYl`%8dEJ%(^zCY+ZCuFiBsyFH!= zT6o%eA}RModtw5QWjaR2UKz}-8-#QHK~JnF&ePr#PYX|ir-Ng(C&n?xlN5;W<{0C> zN;uaGr{Bi``$D03l0BV+g+iimUMHMahs(rQ6dvF)Pr9#ovnRuo>FFk%*9hlY;k@=j zsr+3Z;mY}UEqHo|yzBl^3*-J*G5kv~<&M$8Ur&-}h>zfy{K9$t-%^JAiW@wU!g*z= zfIOo-qXP@F9LY^LdBy~)vm6;s$9cvF-p+C)Hr?dO54@S>$Za~wGda+@yCcav7uqa@ zHa=)`IkZ^^ZEk@!w?Uf*XtNpGY=t&=Lz{b`%`Ry3614di+WZD>&Vfq@mkBN#ToK@k z1Xnb;+JP$$T=C%Q0Ino(bplsEaP%nyoxSjylJK$~#ZWG*D;2sO^+29u7UIp%T;9d{zTfluAxbFb>MsRNe_fBx%3-0^D z{SaQNC=Y|O7nCz7|0VrPtJ2(Z(_n`Izbr`4H0`(D4cZ0eIG##{9(AtBR09qnwsi38UwiL9>K)V98t3dN! z1KI}A0-${f+K-_91lk$Ueg)kDdQ;FxgI)l75$MIBmx4YU^f{nE3Hr;RzY6;6puY+F zLC_C_(Fu(1VB~<&6O3Fi27xgIjH|$F1i-i%j4fbn17immcY|>c825qk1Q<_&@eCNx zg0UBj=fOA)W(=64z+4XIJz%~9<^eDdf%!g|AAG4XjVV`U>o(Ua(t(?E+f`TL(JD>o+R+3fhPkz-YoEBgJ&Rk z27@ObJQKk)89YfcSBUrx+QvcKWN6z3+75%Z*FfZ2h+GGecR}P%h`bjfABD)LAo2x>`~o7sfv8px z6$epWAu0``(jh7XqWVHq9z+d?sF4sg8luKR)Od(0fT$ubL={6+DMZbHC_hB)hNy2L zIufF@Ai6(94}|C;5Iqc{M?&;yh%STZOCWkKMAtxcEkrMZ=+zK?DMVid(bquqwGi!x z=nW7Zfat9d{WwJLg=p_v5d98BAAsmX5Pbxqk3#e(5d9fMe*@7!LG&4jJ`2&mLriOk zaY2j%F&e}e5K{#)^C4yh#CRcQ4a8gqF;_s$T8LQ(G3z1bW{7Ekn5Q7-74XKq1~IQg z%o`AM1Y$mbm}3xg9AZvD%t?qj1u@@4OtAAmAm%)@b3nVM&`ySSSg1AW#Hv{4-AZ`xC&4al45LW|nOChcv;#NRh5OW#C?S#0OA?{6x zI|y-yA?^c+`v~GrK-@`)I}35YL);$_cOKe1pnX$lFGG6`+ItOXZ$Wzxv~LUTqo92Z zv>yoVOQ8Mb(EeU%|0cBm5!(L_?f-yy7sM+NuR**4@fO5;AigccM?riH#3w;~Cx{;e z@uMMrEW|H{cpt>Cg7`bZ8@~hM?}qpnA^t6he+S}!gM_A#AVb0+NEi(XVIt+sjOQ6Gc=x_)+oQ6aou?Zx~kk||oTS8)MNQ{NVPLS9c64M|t z0}{JIVjd(8fW)DYI2;m3LgF|`%!kB@kT?Sp7kMEu_B>e}0Lhync`GDu zhvd5;d1v6BevZr2zlY@0kbDM`&q8vrtMkyg33QgBb93n23Oc)>vkIM!$9#E?rcJ!J zLzg?D%U08$1)$_z-E11a+$q$Lgp{Wt0t_pOGfUc3ywFh+V4_$-jPAT21gWn<>bsD708-CFS`$cFIp&QVx9dzpm-IAf(Q0O)ey5&Q+TIjX{x_P17&3Ngy3A$~8Zf`)h1JLadbUO=K zO(08xtOUqPfvi->nhIHEkW~&@%OGntWL*kb4@1^7ko7ENeF|A8AnPP_cR_a>x<^3w z4Cvkqy7z|emC(H!x-alT_bZ|M_0atW=)M!WKMdU;h3@Y__ao5#1ITU)*{vbl1=&fE zod($%kev_Ng^)cRvR6U&)sVdwvhRcJ#~}L&$bJ{HKZNX~ko_CvG=&@)aymhdHv@9I zLC#pnnF=`tkh2_eE`^-SA!irlJPtWeLe58!a}08hLyzXrLxmnX^vHuA!=cAW=rJF9 zEQKER(BoFwke2R#o$&-2i$IrM4?y}CiK-q5Qr^qK~}WOG zz(g1jfB_p}z$O^*5qJk2g8|23U>6ve1p~8Tpce*S0Ryjsfv>>8eK7D{7^K6XC>Rt2 zg9>0!ISi_VL0e$Zy)bAO4Eg~E{RV^1!QgH%xHk;$3xgNKU>^)#1%sc0!7sqzm+&&g z2}9g4M1>(EV8{d*G6{y<21B;OknJ$!I1Kp#hMb0>U14Z84DA6!FNdMm!O-hr=wTT8 zDGdD_hIN8r88ECH3|j@mu7+W2Vc4rMY(ETp4~Dma;lZcO;T{-10)|h3;gewadKi8S z48IMAe+*dI(1S4x=R)?S#>zVf17eJrzc; zgV8s^=v!d)H!%7q7<~rD^oB8mVa!k%b3Kf?6~^2SV~)X??_kUiFg6Ru_JOheVC*^= zdlPua-U4GkfU#e|*kdrR6O7A%aou3t8W^`0#;t>KhhW?%Fzz!Lp9JI6V0;FQzXHau zhw*+Ge+0${{a-+S0_3Maek$abL;hUIuZH}`Ab$_!KL-oDa#m~s%NYA`htrbffmLYP_sQ>$R=T`=`QnEEhG{S&4&gJ~^b+C-Q(9j2AQ zv<8^wy&I<81JjPfw4Y(xFEH&_n06KloKVml3Unwip}>ZM2q=hzf*2_11O;87AQcKS zpdbqha-d))6s&=Q^-$o4f*YaWCMdWS3hsb{?ND$R6x;{if(M}BVJO%O1in5`oCln2VqG3=p35upb(KIM3grezCR02h1P*e>?HBeLsMT?mUT1C-ndCAUJ!?ND+jlx%^LZBTL-lspI}&qB#wD0vA=UV)O=pyX{R*@u^s_n_n> zC^-wIO`ueQ(q>TF5=tYXGzLmzp)?*!6QQ&YN>@VZ)lj+?N^gMD8=>?jD7_U*H$mxE zDBS_2JE8O)C_M<&Q)8Q&7Ge%3p=@H=z7&DBlm|2cY~gR5XW*OsFV>iuF*j7b-r2iW5+A5-Prf ziXWlkXQ=oUDqBOP36(ZfM&YHh9aOf5%2cRKhsy3y*#jzbp)wCD2SDXusGIgR$6;!T+%Joor161A)l}|wBb5QvLRK5(AuR-MpPKs&^hncNmrVD0zRhX&6OcQ2$U}jsG*%M|?f|)B| z=4P1rD$G0uGk<}ZXJO`VFsmudl415$FncY`z7A&lVfKwM`(~JZ8_d2FW^aPoTVeJN zn7tEb-v_fFfY}ej?8o4-Ci#w6n|KexoWn5ZH<;TL=E^WP8s;X#+>S7}Kg=Bgb4S74 zg)nzH%v}j{Z-cp8VeWRA`#j8j1LnR3b5FqB(=hjEm=_K65@B9Pm^TdOjfZ&?U|tQ( ztA}|jz&md{%)1}vJqYuHpg*A+sO|#QSx}t~)pMY_4yqSJ^(|1n8LGEJ^_x(A5ULMD z^*NX?!~AA2zdOwD3-j|}{!Ey^0Ol`*`DPj99TseZ1v_BDK3MQREcg%>oQ0YuP$NN29MmL3O&6#c1vQhP zW(w3)L(LMXSq3#XLd~5}(*QNELd|~g*1QKb=U|}>3!A~hcv#p47IuY&6JgtW#lg@wCd;hV7VAS^r#3x9!HptcFr#zJi;sO=23BcXO8)J}%l1yH*T zYL`Rp7QEEn3$?qTb|2Kf549gc?K!BEp{^O!r9)j$sLO@A@laO)bwyBj4b*Lbx&YKY z0d>zo-3w564C=mvx*uRsOIW1CA`=$%fki`L(J)w43X5jLqB&kzv>Fy&1Bmb8W?9bt(#9hPLmk_oV+2$mGXlFMPqb+F`mSn>!g*$qqf zz>-g3$=9&t8(10vOWVWJ1Xwy0mX3p^`LJ{$EL{#uSHjX;Vd)lFx(${df~B9p($8R7 zb6BRrG9A3j2EwvYuxt!0tAk}LVVMt>ZH8s{z_R;b*&$f=2`u{z>RUs-3H3JAXFz=~ zsP7H+Z_stCa73n%{wpm14OaAk75!nwKv=OFR$K!su7wqk!-~DI;(1u{ zC9F6FE53)78mx?jmC>*=2Uh06$^o!4cxSV+5?0QHmFr;TO|bG7SosvJd;wOz1S?O$ z%3omRS@1@Iw*z>Sz&jkg`QV)h-r)6@7rX+zTfrMV<=zk8pTPSEc+Z1xIQa6xHxYa{ zg6~f7c^km@E%?rW?^h5xAo4&A0I?p#Eg)_KaU8@CAWp-oJXkdxR*i&J0a&#WR&9b+ zhhf#Hu}DSf~y1bbgyU0v7ix|Vfa>$=v})&1G`tE-|Q zHWWdch2Fb}AYwyNRC)*LX+R($fg}j`;of`xIP=Fl=Y8g{nR(`&=Xj}{mk#k#4KJU< z%jfd)`MmrrFTc#ouki9xUS7$|tLfK_er@R2j(%6u?(&kl`^mk1M?YJz`(~C_$&ke%fKZJ{Dpxl zc)d5TpTX;A@p|HUUhmKAukrdSUf;~?TX`dwHwt;9h&S%#jYoLn-@NfPZ!F`DpLw$l zZ+7O*Oy0boHy`88|M2D_-u#X?m-1F~-fGKR?Ro27-g<<${>@vnd21nww-)hs@OCP1 zH{Xzr!ns&@y@xt za|!QU$~$lJ&QRVN#yjP_bBK3pc(=ENchBJ6vv~If-W|ZZ19|s*-d(}Fs~Fs#!Ce`g z&EUrv{49h2%iv86-o@ZOymtZbUB-Kt^WN*c_a5&J<-P5^SH^pL8B)lQGZ}I=L!M(u zKZd-TV90k2Sss%&^2#hW*H}y$q{mSUtmzGVC{o zr!u@5!`m^u1H(HpJd@$s49{hF5yMYq_!$g8o8jj%{C5n0gyF*({sY5n8PS~)moVZw zM*Nu(H!|WDM*NM$h}#+QI3u28M1m17G2#_Q3}D0%M!e66QH=PA5n~uJkr7iEF`W^g zGh#U-Rxn~CBZ?VO%831pILL@Yj5ra%QAQkRWHKYWF|rpU3mMs0!pPGY`3FY+k&%C5 zWKEz{m-VoXg00j9kRXFB$m_Bfn$h&x~Bb$kmKo$H=XW+|8&|M&&Z9 z52Mav)H#egpHUYw>MBNE!>AMaf7I=ax{Fc&VAOq#dXQ27V$`#YI)VR>Q7x_^5d^ zK5D^7t@)_Uyw$hIss@R(8GSCJZ)5bmjJ}W2FEDxlqX#m29HXZ*dM2aSF?t)LcQ6KH zQW(>eF?o#X!qIzl8BuG5#9HKg{?i82=RG$1r{>pGGP@HHZ$P_PhEnE z4Vl=OiM^P3Iup-i;_XblkBJX3@f9Y%#l%5OoX^CsnD`A7e`R6`6U&&C%%oOKN@G$H zlg?(+xlH;ilkR5HKbZ6qlU`@in@pO)r1?xrEMU?aCT(TXHYO)CxfPSsn0y+O&u8-Q zn0z~v?_=@HTITc-TXl;un*X37DkR5G<0Q`<1L9aAr0>Sav5oT+y)^+BdS%+xoT zI+Uryn7W9m-!XM5Q_GlI#nfu1wP0F%rgdQ2=}fzTX%|YEb`R4YX4)f68_2Z5OdGHV1gD$}PheJ<1IF?}7=w=sPO z(~mPFnHj0fIENW0goqh`V8(;Y_zyFlV8#Sy%w)!FW^7=_4rc6PW)d@7GP5-^&tc}p z%=`m0A7SQG%zTEK?=kZOW`4xX@0q!RnX8z2lv$0LmBg&R%sPiz=P@hsH)j2VS^s3# zAZ86`)<|Z3%dDT7wVYYi%sR$w%+6(YA+w8^eLb^pW%g~%9?9%+%$~sPVrCy;b|rH< zGAD;QdCd7EbFO2~pPBQ2%sJ7-&Ut}3BbYOmIpdl0C3AjY&NAjyG3O|Ajxo0jbMu*7 zz})MY`&Z`vjk(V=w?A`VW9~HO&SUO;=5A!}PUi0BlN3H_%_r%6auc82!6$d|$utR{ z%;S^!d{W1}hRkcsyla_v3-fMe-U#N6W!`w^RWt7xpW@Ri`1A%oy^&AH^64}_ox!K| z%x}c}Cd@yV`Ij*NQs(z#{+rByoB6AlznS@46MS|upJnk`H$M9(pFPTFkMY?8KKq8x zmaw1^3!1Z_1q-fb!A&fr*6reHvd6;_KmjJ(90?@%28wuHc(9_~v(fa}nPR;G1{& zW-#CU$TzF`W-Z_5@ogWz?aQ}M@$LWk_9edknQzze?FN=)v7`q}da>kDmORapXIb)N zf+ee2vX<}K@m(h0W%1oTeD^TlJ;Ha3`EDuS{mA!eeBY7pJM;ZreE%TdKg{>v@cmDG z{|igou(UHvGg*2&OYdXp11$ZJr4v~?nWa@MJ<8I=F@Ct3A8z7@TlirRKMd!Gk^Hcm zANKRZ0eV_L^HUi=?d9hl{Cp}upU%(!;phMI^Yi??ke|Qh=kNF> ziCBtZd23 zG*-4{<;kq<$jUCP%wlB@EAv^|gOw-NcUBg$@>Euy!OF9TbbKfVT(_z*tCCsOlvO9O zsuio!S=EkJ9azqe$y0fY$s|s1wmsO{+>P%Lh!>aRHb>WZ$55*HF#s#a! zvuYBnrm$)*tLCw)l2yl99jtD|>L#pi!Rl75zLC{;v-%&bzK_)pu=;UU{~xQDvU)A6 z*Ry&PtGBRv535U9lgXN1tSMwoU)G#TV$FH1xqvm#u;x|P3}DUcta+0)Ls>J7H7i;3 zD{Hp1W*2Mru%?1F2U(lP+EZD3I&05j?K!Nyn6-ak?enaCowaYWb`WddW$j4Te!$wD ztUVxMZ6#}~S$mju7qIRM)?LNAYguxQ##0_!HRZYt}hv+gkK8?n9#>r+_Yl=auM z{&v>i$@+U(e=qB&vwk7#7qR|J)_=|VWvu_14QXu1Ot2w~4LNMcV?!S{^ku_SZ0N^^ zSK06y8(wF_du$lWhV^XN&4yw&l(Ati8>-oGn2o*Jcn%xSW8?4Gco7@_#KtSx_zoLC zWaGzd9LvV>Y&B9yUG9rbpN` zn@x+^^c9=FWz%e&8yhFhRs{qyp1j0*iyunQ`mAkTh3(5g>3mfTb^P|KeoKeme<(wI$Pdj z%TTub%$7}T*}|3+JUiG@E@8_)wq~)lkgY{*J(aDev-JYDUdYy$**b`=@3M6WTi<8v z$7~(L)=g|JVQU#%Pw-T*^$1(*__Ytep2x2j@askVdNIFV$*))Q>#O`am|uq^_;nb+ zj^Nj^{5qa(!M2mw){<>$Y-_`|&TPwM+f=rF#vh4@9En|Blwzpz?8r$2l zy*=BzvOSya53v17wm;4G=h&WL`>QAH0JeX__7!Yj#rCyqU(fdKY~RU_B6gh5j^DB4 z_w2ZY9apjA8g_ilj_K@}$&NYf_=Fu_u;WX120Kq;XG?abv9k?3JF_#Boqu8HUF^Jv z#Lj=R^L}lAjK#;!BjbvC7?-y&i6Aa;Mi?vL0#hTY@XJ)PY%*?ovT z!JdZfX~Ldl_OxP88hb8d&kgLkkv+Gw=PvgAgFW}L=Rx-ThdocS=Nb0=mpw1AX9Rn` zX3q-t)Kc6aL2(O;TT*;7#kmycQ(Q#xsT7|<@!1rgNAVvh{v*X#P<%DT*HQcs#qU!5 zF~vVnyo%y26z`(=AjMS_AEo#>B@HNPOi3~&ttd&SB+-tN4wQ7FB$tv?C^?^!KT&ce zC4ZshW=d|Qlsrz!6O=qp$?KF1p=1mt<0zR<$t+6dQt~M!3n=-9 zlJ6+_fs&s{lq{#DoYK~mW>R_?rPoq=3#E5b`Y%c!rSxe^pQH47N?)Y3AEj?nI*8K2 zln$kIIHhAKokr~YGTqwIOgUZkuaW&J4| zMA=}LD?wE##1(fviX!Pq3nCgR#3K@vUQYgq-+aidnhZVY%gUMlvPqzPkAHC zTTz}yc}L2-P@YA34(0ij7g2sHw+(x< z+50E<{+Yzyzp?jz_CCno|FHK-_CCYj|FZW5_6}h0>+F4tz3;I1J@$UU-U;lT%iekH zUCiFE*}H_jOWC`Oy=&OJp1qscyOq7$*?W|I+3YJ~-_7j1Q^LM~vF|DN^<&?w?0cJi z@3LTp(ZzB7qux~p1X0h)l_8nq>1NIlN{}lFL!2Un7|1a#niT!u5|8Dl* z%l`Y>{}B8CkNr=v|5^5*P;K`AkNqRr{|)Lq#nWN2xf@fn*M};XoG-^yENq4xGV( zvpH}c2Y$za-*ey!4qVNF>qs29fde;j;4uyi+( zH+j6iA2eUYs!@+zG_Tb0Vd=7ri!Q~uW$-zw=+$!PVb`I|1 zU@->|aIlJlH5{zv;87|YQ+X1V?WpWVWiFM4RQ9FvG%C-e@*FBJrt(rMFQ@WKDzBmP zW-9Nb@~nf2syj^(IwAsTxkzD5^f9Y7A9Vsrrnn@2FZz z)k>?_O;l~As+g)Ws`innIzUwwRYy6L%%N5s>dc``4t3{HPYxAws4s_3Uye=QIkYX zDmBfiX-Q2QH65wxPR*&*Tu9CDskwrhtEst;nj5IOiJCj8xtp4Mskxt;hp0Ir9oM`_ z&Fj<*qhYdE}~ z!<#s~i^IhnF5~b%4j-QpJ%PjvS{pnc7y= zcBVFy+V0f$q_&XSzSN#Z?fKMRNbSYcUP|rd)Lu{Rt<>H_?Y~LXK2Gg()ILw`i`4d` zwm-Fls2xo0P-=%$JBr%z)Xt=K0kz*#`vbMBs9j6#25L7``zy62)Rt4bpW1`e9-{U) z>Qbpor>-k?+0^xvP*+G@U+PYy?o8?~r0!zsE~V~r>aL{jFVx*l-J{e!M%}ZVP%Y~I zN8QWRy-MBN)V)jH5bB0eH)XkvoGwQyhZYgyusar$cdg?ZvAlXV?F?D6s?W67h zbyd_Ir9OrFbn3fOpG|#F>IAf)bFLfmil_?k8u=7Q#jg`qeD14hNI&+I+3H3IXZ`@OU`9Kjz_AxO z){kTTIW~-AvpMz&$L4cvA;%U;IQAXKmUFCvV>KLa!|@&*Ka=BUbNoDx|BmCo=lCBu zekaEt;P^uve}v& zmWlXTM6rlOnTUNN4v44{Q6r*O#8DB)MS}*SL1WP%Su|)W8gvs4@E}z zooKjJG(096H5HATi$*O)qt>F)rJ~WFMWernMmLK_f0c+vZ;3`DMWYWyqmMk(?otJB#E~MDl4O z`E-$dmPkHVBwrwsuMo*si{xuX@@*pdc9DFSNdAXNzE31SC=tp35y?-8fi{u|g@@kR1RwQo_$(u#; zL6KY~k`IaG!y>s(B>yH-QbkHfkDT(hz${LZfPNZxUDO*I!Hj%PZq#PHi%|z-+BDIxBO&6){L}~|-nkQ1b zi`1SXwNRw?6{)9*)N4iREh6<+k@|O$dZ$QzLZm(=QlA#7&xzFMMe2(p^(~P|eMh7Y z7O5YL)G;D;yhxoSQm2a486tI&Nc~=D{8~1ET4}qUoce>Eja7bi8Q#m1w$2G~Fed?h#EZ zMAJ&qv|2PhBAV8VX33&is%X|qG)ouF+KFZzM6<4CUJ%Wuie{gP zW}k{?i$t>@MYEqov(=*6I?-&SXtpIGn(Y$JibbF8LeadhXnu}pevxSYC(-;W(PEltu|Tx=T(tN>wD?K1ST0(u z5-rw>78^v1&7wu3RJ7PDTI?4s4vH3sM2i~H;)rP3NwmC3w0uysd{4CeK(w47TFwwH z=ZlsLMayqR%VnbF3ej@6Xjv>;9uO@nMayc@@~~*tNVIAqTD1|a+KW~hqE%Lyy{iB<)o)mfs|xuVqtqSZyB)g_|UWun!eM60VrtG|j?e-o{47p?9TtsW4q9ulp_ zh*mR2tJ$K}C!*D-qSZ0cx~XX0T(oW}TDKOhuM(~QDq8+H8^bsYqKO(iVxdZxSNyN0GK#q-_ytWg=~_NINLf4vDnG zBJHS1J1){2i1cKU-b$o*5b1d$y}L;7Ez*lb`l%xQERlY$NWVa&UnbJ873sH$^xH-H zJtF;|BK-l8{)k9_Or$6NPozI9(q9(oZ;15aB7LMt|5&7t73mX1`c#oVL!{3Z=?g{r zw<3L|NM9||H;DAjBK=p9zDuMRi}W&)UM13xiZ+c!nX#2TnTP4~N?HY)7jYYd8(XORv*IKl@OtiZ}w7XHXyG69SRkXWDw7XZd z8zI_F6zwL9cGE<=8KT{M(QbigS0&mL?Hh>pCmM$KNuqtaXx~<}KUuWT5be8(_PL_{ z?V|ldqW!-_`{zab7e)JiqJ4kSexPXorf5G%v>z&3~(M28R^P7)nj ziVj^xhi;-np6E~@I`k49`iKsvhz_TV4rhrDmx~Tphz?hY4%dnfe-<5X6di669c~pJ z{w_K^DLT9+5*>z!4&R9m8%0Kn$ml3C3Pi>!BI9==<06r9iO9H2Wc*2FTqQDY6d5;* zjN3%U9U|jyk@2X=cuZtGDKefB8UGa-FN%zQBBQ^^7$h=25E&ndjBz4kqR5yc5g9W@ z#vG9`Ph>0>89#}PUqr?>k+D-`>=7BIB4e+}s1O;IBBNSl91$H;M8{^LV++x-wdmMJ zbZjp=W{8fRMaQn9%=f$G)*P`_;*fylgGWZot+Ul5sZh|IS{=4g>Q zPGn9LnNvjObdfnrWX=_tpNh-{B6E?*Tp==7i_CQ*bEC-IA~Ls$%$*{0kH{<)nR_Lo zYm(^NR&*^8UC$R?uM}PH5nZ1WT?dPmUx}=5Mb`HsYlX;KEwa{$tc@aTi^!@F z+3iGjuE;)5WM3|_uMpW+i|p$}_6;ICag)ftUt~WavY!&!PmAnlMD`$&{ej5-NMw%@ z+2cj_B#}K;WPc{I7mDo9MfM_*{iDeKNn~#p*}sbH9U^=HS}BBxB`>=QW$L{6>9IVy6Fi`)hx_eB2BJxS!Y7r8kiH&5jD6uE^W zx39=OL*$+eNf~+EOP%Pavu@7&xza<-2Fsu ze~~*-wkWs1Bkk(VR#@Z?nkTF7kGXyke2JSL9WQyh@Qz`S*+bheZA(BL6Xw|9>L?DT&B`R^%r{{{KY& zJ0kx*k^jEPA1?Aoiu?~n{%DauPUKG%`3pt<8j-(KqYk) zME9FS_rHqnw~6j|i0=JG_xD8ip`!b6(S4NY{!v17A0xU?6x}}&-9HuGzYyKO65YQQ z-M<&ze-YhRitejL_jRJ6p(w}@1*eFDb49`VqToVNaIq-3Ruo(>3hopI_lSajih>73 z!Na29QBm-?D0o5?^b-aBML}YqD0ov83=#!{MZr)}FkBQ&5Cvb0f?q_zPEl}N^ynmd zbQe7?5j}1ZJsuK0o)tab6g?)39@9jRnWD!W(PN(IF<G@(ep{s^BK|eCDHR0(Q|<4`L5{sk?1*E z^c*L8P82<7iJsq!oXujfUt{-W1l(QB0G^@ZqFB6=MZy{bg78qup(^lmA7cM`q3h~5RF zcQ4VqkLZ1h=zY5AeU|8bvFLrN=zY29eWmDqwdj4F=zX{7{cu9`{y)+C1<`w$=sjNa zo+x@x5xu92-m^sSIimLp(R-`ty-oDqDSGb_y-P*!a?$&^C~PJQPZEW#L}9upY$pmk zh{9}9m?H`cL}4#c*hdteCkihRg%^v$#HFI}a#47-D7;P--XIF^5`_?aCe6NPVx!nZ}?NKrUW6wVffb4B4NqHvxl{6-Wm5rs=d;WAP9izr+v3O9+u zEuwITDBLXyOGKiuS`;1@g-1o4VxMW1$}Pp0V8OY|ueeNGj9&Jcah z7JV)deJ&DxE)jih6n$UD7s4&-7ktB5=D=QqQ^wh zv!W;=ivA~xUK2%ch@!Ve(J)aoQWSkCiYAGod7@~(C|W3rzBuuCy(ro!iZ+X)6VdGu zMdhMspC~#YimF7>QPDS7^z9+~-XQwkCi=c4`t}!nr;EN{ioQ!k-(~;r&(9h)`2TGC OoBluh|DWjl^Zx^F)?M8I diff --git a/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist b/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist index 556fc539eb..f5c01eb1d0 100644 --- a/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist +++ b/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist @@ -16,5 +16,18 @@ landmarkName = "field_avg_neighbors(int index, glm::vec3 * result)" landmarkType = "7"> + + diff --git a/main.cpp b/main.cpp index 016e202ab0..3596d20183 100644 --- a/main.cpp +++ b/main.cpp @@ -95,11 +95,16 @@ ParticleSystem balls(0, 0.0 // Gravity ); -Cloud cloud(300000, // Particles +Cloud cloud(0, // Particles box, // Bounding Box false // Wrap ); +float cubes_position[MAX_CUBES*3]; +float cubes_scale[MAX_CUBES]; +float cubes_color[MAX_CUBES*3]; +int cube_count = 0; + #define RENDER_FRAME_MSECS 10 #define SLEEP 0 @@ -271,6 +276,18 @@ void init(void) myHead.setNoise(noise); } + int index = 0; + float location[] = {0,0,0}; + float scale = 10.0; + int j = 0; + while (index < (MAX_CUBES/2)) { + index = 0; + j++; + makeCubes(location, scale, &index, cubes_position, cubes_scale, cubes_color); + std::cout << "Run " << j << " Made " << index << " cubes\n"; + cube_count = index; + } + //load_png_as_texture(texture_filename); if (serial_on) @@ -491,6 +508,20 @@ void display(void) glRotatef(render_pitch, 1, 0, 0); glRotatef(render_yaw, 0, 1, 0); glTranslatef(location[0], location[1], location[2]); + + glPushMatrix(); + glTranslatef(WORLD_SIZE/2, WORLD_SIZE/2, WORLD_SIZE/2); + int i = 0; + while (i < cube_count) { + glPushMatrix(); + glTranslatef(cubes_position[i*3], cubes_position[i*3+1], cubes_position[i*3+2]); + glColor3fv(&cubes_color[i*3]); + glutSolidCube(cubes_scale[i]); + glPopMatrix(); + i++; + } + glPopMatrix(); + /* Draw Point Sprites */ @@ -587,7 +618,20 @@ void display(void) glutSwapBuffers(); framecount++; } - +void specialkey(int k, int x, int y) +{ + if (k == GLUT_KEY_UP) fwd_vel += 0.05; + if (k == GLUT_KEY_DOWN) fwd_vel -= 0.05; + if (k == GLUT_KEY_LEFT) { + if (glutGetModifiers() == GLUT_ACTIVE_SHIFT) lateral_vel -= 0.02; + else render_yaw_rate -= 0.25; + } + if (k == GLUT_KEY_RIGHT) { + if (glutGetModifiers() == GLUT_ACTIVE_SHIFT) lateral_vel += 0.02; + else render_yaw_rate += 0.25; + } + +} void key(unsigned char k, int x, int y) { // Process keypresses @@ -728,8 +772,8 @@ void reshape(int width, int height) glLoadIdentity(); gluPerspective(45, //view angle 1.0, //aspect ratio - 1.0, //near clip - 200.0);//far clip + 0.1, //near clip + 50.0);//far clip glMatrixMode(GL_MODELVIEW); glLoadIdentity(); @@ -810,6 +854,7 @@ int main(int argc, char** argv) glutDisplayFunc(display); glutReshapeFunc(reshape); glutKeyboardFunc(key); + glutSpecialFunc(specialkey); glutMotionFunc(motionFunc); glutMouseFunc(mouseFunc); glutIdleFunc(idle); diff --git a/util.cpp b/util.cpp index 81d6bca869..89fdcc8900 100644 --- a/util.cpp +++ b/util.cpp @@ -15,11 +15,37 @@ #include "world.h" #include "glm/glm.hpp" - float randFloat () { return (rand()%10000)/10000.f; } +void makeCubes(float location[3], float scale, int * index, + float * cubes_position, float * cubes_scale, float * cubes_color) { + int i; + float spot[3]; + //std::cout << "loc: " << location[0] << "," + //<< location[1] << "," << location[2] << "\n"; + if ((*index >= MAX_CUBES) || (scale < SMALLEST_CUBE)) return; + if (randFloat() < 0.5) { + // Make a cube + for (i = 0; i < 3; i++) cubes_position[*index*3 + i] = location[i]; + cubes_scale[*index] = scale; + cubes_color[*index*3] = randFloat(); + cubes_color[*index*3 + 1] = randFloat(); + cubes_color[*index*3 + 2] = randFloat(); + *index += 1; + //std::cout << "Quad made at scale " << scale << "\n"; + } else { + for (i = 0; i < 8; i++) { + spot[0] = location[0] + (i%2)*scale/2.0; + spot[1] = location[1] + ((i/2)%2)*scale/2.0; + spot[2] = location[2] + ((i/4)%2)*scale/2.0; + //std::cout << spot[0] << "," << spot[1] << "," << spot[2] << "\n"; + makeCubes(spot, scale/2.0, index, cubes_position, cubes_scale, cubes_color); + } + } +} + void render_vector(glm::vec3 * vec) { // Show edge of world diff --git a/util.h b/util.h index 637248b761..c6c2a319db 100644 --- a/util.h +++ b/util.h @@ -8,6 +8,7 @@ #ifndef interface_util_h #define interface_util_h + #include "glm/glm.hpp" void outstring(char * string, int length); @@ -20,4 +21,7 @@ void drawvec3(int x, int y, float scale, float rotate, float thick, int mono, gl float r=1.0, float g=1.0, float b=1.0); double diffclock(timeval clock1,timeval clock2); +void makeCubes(float location[3], float scale, int * index, + float * cubes_position, float * cubes_scale, float * cubes_color); + #endif diff --git a/world.h b/world.h index e11976c1c8..106f47ff69 100644 --- a/world.h +++ b/world.h @@ -6,7 +6,7 @@ // Copyright (c) 2012 __MyCompanyName__. All rights reserved. // -// Simulation happens in positive cube with edge of size WORLD_SIZE +// Simulation happens in positive cube with edge of size WORLD_SIZE #ifndef interface_world_h #define interface_world_h @@ -14,5 +14,7 @@ const float WORLD_SIZE = 10.0; #define PI 3.14159265 +#define MAX_CUBES 2000 +#define SMALLEST_CUBE 0.01 #endif From 410e7b2d7e59e4c8fcdb3caa997aae1d7757e08e Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Mon, 26 Nov 2012 14:47:30 -0800 Subject: [PATCH 017/136] Commented out cubes, added particle count, misc cleanup --- main.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/main.cpp b/main.cpp index 87cbc5c2c7..5010dcf1d5 100644 --- a/main.cpp +++ b/main.cpp @@ -49,9 +49,6 @@ #include "cloud.h" #include "agent.h" - -//TGAImg Img; - using namespace std; // Junk for talking to the Serial Port @@ -95,7 +92,7 @@ ParticleSystem balls(0, 0.0 // Gravity ); -Cloud cloud(0, // Particles +Cloud cloud(100000, // Particles box, // Bounding Box false // Wrap ); @@ -243,6 +240,8 @@ void initDisplay(void) glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_DEPTH_TEST); + + load_png_as_texture(texture_filename); } void init(void) @@ -276,20 +275,24 @@ void init(void) myHead.setNoise(noise); } + // turning cubes off for the moment - + // uncomment to re-enable + /* + int index = 0; float location[] = {0,0,0}; float scale = 10.0; int j = 0; while (index < (MAX_CUBES/2)) { + index = 0; j++; makeCubes(location, scale, &index, cubes_position, cubes_scale, cubes_color); std::cout << "Run " << j << " Made " << index << " cubes\n"; cube_count = index; } + */ - //load_png_as_texture(texture_filename); - if (serial_on) { // Call readsensors for a while to get stable initial values on sensors @@ -525,8 +528,6 @@ void display(void) /* Draw Point Sprites */ - load_png_as_texture(texture_filename); - glDisable( GL_POINT_SPRITE_ARB ); glDisable( GL_TEXTURE_2D ); if (!display_head) cloud.render(); From 4ebcc0cff35867a58641433ebd30c9e659f35086 Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Mon, 26 Nov 2012 18:17:00 -0800 Subject: [PATCH 018/136] Look at all the pretty colours! We now have color values for the field elements, which are gradually added to the particles as they move. --- cloud.cpp | 19 ++++++++++++++----- cloud.h | 2 +- field.cpp | 19 +++++++++++-------- field.h | 14 +++++++++++--- 4 files changed, 37 insertions(+), 17 deletions(-) diff --git a/cloud.cpp b/cloud.cpp index 18a05ced44..02b65534b9 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -21,14 +21,20 @@ Cloud::Cloud(int num, particles = new Particle[count]; for (i = 0; i < count; i++) { - particles[i].position.x = randFloat()*box.x; - particles[i].position.y = randFloat()*box.y; - particles[i].position.z = randFloat()*box.z; + float x = randFloat()*box.x; + float y = randFloat()*box.y; + float z = randFloat()*box.z; + particles[i].position.x = x; + particles[i].position.y = y; + particles[i].position.z = z; particles[i].velocity.x = 0; //randFloat() - 0.5; particles[i].velocity.y = 0; //randFloat() - 0.5; particles[i].velocity.z = 0; //randFloat() - 0.5; + particles[i].color = glm::vec3(x*0.8f/WORLD_SIZE + 0.2f, + y*0.8f/WORLD_SIZE + 0.2f, + z*0.8f/WORLD_SIZE + 0.2f); } } @@ -38,7 +44,7 @@ void Cloud::render() { float particle_attenuation_quadratic[] = { 0.0f, 0.0f, 2.0f }; glEnable( GL_TEXTURE_2D ); - glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); + glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, particle_attenuation_quadratic ); float maxSize = 0.0f; @@ -52,6 +58,9 @@ void Cloud::render() { glBegin( GL_POINTS ); for (int i = 0; i < count; i++) { + glColor3f(particles[i].color.x, + particles[i].color.y, + particles[i].color.z); glVertex3f(particles[i].position.x, particles[i].position.y, particles[i].position.z); @@ -75,7 +84,7 @@ void Cloud::simulate (float deltaTime) { // Interact with Field const float FIELD_COUPLE = 0.0000001; - field_interact(&particles[i].position, &particles[i].velocity, FIELD_COUPLE); + field_interact(&particles[i].position, &particles[i].velocity, &particles[i].color, FIELD_COUPLE); // Bounce or Wrap if (wrapBounds) { diff --git a/cloud.h b/cloud.h index 575501be25..8b995cc0c3 100644 --- a/cloud.h +++ b/cloud.h @@ -22,7 +22,7 @@ public: private: struct Particle { - glm::vec3 position, velocity; + glm::vec3 position, velocity, color; } *particles; unsigned int count; glm::vec3 bounds; diff --git a/field.cpp b/field.cpp index d08c93822c..17570c31ed 100644 --- a/field.cpp +++ b/field.cpp @@ -7,15 +7,11 @@ // #include "field.h" +#include "glm/glm.hpp" #define FIELD_SCALE 0.00050 // A vector-valued field over an array of elements arranged as a 3D lattice -struct { - glm::vec3 val; -} field[FIELD_ELEMENTS]; - - int field_value(float *value, float *pos) // sets the vector value (3 floats) to field value at location pos in space. // returns zero if the location is outside world bounds @@ -33,7 +29,6 @@ int field_value(float *value, float *pos) else return 0; } - void field_init() // Initializes the field to some random values { @@ -43,7 +38,11 @@ void field_init() field[i].val.x = (randFloat() - 0.5)*FIELD_SCALE; field[i].val.y = (randFloat() - 0.5)*FIELD_SCALE; field[i].val.z = (randFloat() - 0.5)*FIELD_SCALE; - } + // and set up the RGB values for each field element. + fieldcolors[i].rgb = glm::vec3(((i%10)*0.08) + 0.2f, + ((i%100)*0.008) + 0.2f, + (i*0.0008) + 0.2f); + } } void field_add(float* add, float *pos) @@ -60,7 +59,7 @@ void field_add(float* add, float *pos) } } -void field_interact(glm::vec3 * pos, glm::vec3 * vel, float coupling) { +void field_interact(glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float coupling) { int index = (int)(pos->x/WORLD_SIZE*10.0) + (int)(pos->y/WORLD_SIZE*10.0)*10 + @@ -72,6 +71,9 @@ void field_interact(glm::vec3 * pos, glm::vec3 * vel, float coupling) { glm::vec3 temp = *vel; temp *= coupling; field[index].val += temp; + + // add a fraction of the field color to the particle color + *color = (*color * 0.999f) + (fieldcolors[index].rgb * 0.001f); } } @@ -172,3 +174,4 @@ void field_render() } + diff --git a/field.h b/field.h index e1d26206f2..33004554d2 100644 --- a/field.h +++ b/field.h @@ -20,14 +20,22 @@ #include "glm/glm.hpp" // Field is a lattice of vectors uniformly distributed FIELD_ELEMENTS^(1/3) on side - const int FIELD_ELEMENTS = 1000; +struct { + glm::vec3 val; +} field[FIELD_ELEMENTS]; + +// Pre-calculated RGB values for each field element +struct { + glm::vec3 rgb; +} fieldcolors[FIELD_ELEMENTS]; + void field_init(); int field_value(float *ret, float *pos); void field_render(); void field_add(float* add, float *loc); -void field_interact(glm::vec3 * pos, glm::vec3 * vel, float coupling); +void field_interact(glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float coupling); void field_simulate(float dt); - +glm::vec3 hsv2rgb(glm::vec3 in); #endif From 8b720d53df0a049e2eab15287fbc4baad6868342 Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Wed, 28 Nov 2012 14:01:35 -0800 Subject: [PATCH 019/136] Move constants to macros; increase particle count --- cloud.cpp | 9 ++++++--- field.cpp | 11 +++++++---- interface.xcodeproj/project.pbxproj | 3 ++- main.cpp | 2 +- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/cloud.cpp b/cloud.cpp index 02b65534b9..329b840ab5 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -10,6 +10,8 @@ #include "cloud.h" #include "util.h" +#define COLOR_MIN 0.3f // minimum R/G/B value at 0,0,0 - also needs setting in field.cpp + Cloud::Cloud(int num, glm::vec3 box, int wrap) { @@ -32,9 +34,10 @@ Cloud::Cloud(int num, particles[i].velocity.y = 0; //randFloat() - 0.5; particles[i].velocity.z = 0; //randFloat() - 0.5; - particles[i].color = glm::vec3(x*0.8f/WORLD_SIZE + 0.2f, - y*0.8f/WORLD_SIZE + 0.2f, - z*0.8f/WORLD_SIZE + 0.2f); + float color_mult = 1 - COLOR_MIN; + particles[i].color = glm::vec3(x*color_mult/WORLD_SIZE + COLOR_MIN, + y*color_mult/WORLD_SIZE + COLOR_MIN, + z*color_mult/WORLD_SIZE + COLOR_MIN); } } diff --git a/field.cpp b/field.cpp index 17570c31ed..011fb6515f 100644 --- a/field.cpp +++ b/field.cpp @@ -9,6 +9,8 @@ #include "field.h" #include "glm/glm.hpp" #define FIELD_SCALE 0.00050 +#define COLOR_DRIFT_RATE 0.001f // per-frame drift of particle color towards field element color +#define COLOR_MIN 0.3f // minimum R/G/B value at 0,0,0 - also needs setting in cloud.cpp // A vector-valued field over an array of elements arranged as a 3D lattice @@ -39,9 +41,10 @@ void field_init() field[i].val.y = (randFloat() - 0.5)*FIELD_SCALE; field[i].val.z = (randFloat() - 0.5)*FIELD_SCALE; // and set up the RGB values for each field element. - fieldcolors[i].rgb = glm::vec3(((i%10)*0.08) + 0.2f, - ((i%100)*0.008) + 0.2f, - (i*0.0008) + 0.2f); + float color_mult = 1 - COLOR_MIN; + fieldcolors[i].rgb = glm::vec3(((i%10)*(color_mult/10.0f)) + COLOR_MIN, + ((i%100)*(color_mult/100.0f)) + COLOR_MIN, + (i*(color_mult/1000.0f)) + COLOR_MIN); } } @@ -73,7 +76,7 @@ void field_interact(glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float c field[index].val += temp; // add a fraction of the field color to the particle color - *color = (*color * 0.999f) + (fieldcolors[index].rgb * 0.001f); + *color = (*color * (1 - COLOR_DRIFT_RATE)) + (fieldcolors[index].rgb * COLOR_DRIFT_RATE); } } diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 46d29d8132..166c6ad19c 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -57,7 +57,7 @@ /* Begin PBXFileReference section */ 08FB7796FE84155DC02AAC07 /* main.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = main.cpp; sourceTree = ""; }; - 8DD76F6C0486A84900D96B5E /* interface */ = {isa = PBXFileReference; includeInIndex = 0; lastKnownFileType = "compiled.mach-o.executable"; path = interface; sourceTree = BUILT_PRODUCTS_DIR; }; + 8DD76F6C0486A84900D96B5E /* interface */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = interface; sourceTree = BUILT_PRODUCTS_DIR; }; B6BDADD115F4084F002A07DF /* audio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio.h; sourceTree = ""; }; B6BDADD315F4085B002A07DF /* audio.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = audio.cpp; sourceTree = ""; }; B6BDADD515F40B04002A07DF /* libportaudio.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libportaudio.a; sourceTree = ""; }; @@ -321,6 +321,7 @@ /usr/local/lib, /usr/local/Cellar/libpng/1.5.13/lib, ); + ONLY_ACTIVE_ARCH = NO; OTHER_CPLUSPLUSFLAGS = ( "-O3", "$(OTHER_CFLAGS)", diff --git a/main.cpp b/main.cpp index 5010dcf1d5..256a550058 100644 --- a/main.cpp +++ b/main.cpp @@ -92,7 +92,7 @@ ParticleSystem balls(0, 0.0 // Gravity ); -Cloud cloud(100000, // Particles +Cloud cloud(200000, // Particles box, // Bounding Box false // Wrap ); From fad5e28347393ee8ed9ee0ce14b2f1daaf8df7c0 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Wed, 28 Nov 2012 14:47:09 -0800 Subject: [PATCH 020/136] Moving I/O code into head class, playing with blocks --- SerialInterface.cpp | 4 +- SerialInterface.h | 9 ++ head.cpp | 133 +++--------------- head.h | 5 + interface.xcodeproj/project.pbxproj | 2 +- .../UserInterfaceState.xcuserstate | Bin 104780 -> 104831 bytes main.cpp | 72 ++++++---- network.cpp | 8 +- network.h | 3 + util.cpp | 16 ++- world.h | 4 +- 11 files changed, 102 insertions(+), 154 deletions(-) diff --git a/SerialInterface.cpp b/SerialInterface.cpp index e3b63fcdae..1ac8225951 100644 --- a/SerialInterface.cpp +++ b/SerialInterface.cpp @@ -60,10 +60,10 @@ int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_chan { // Channels: // 0, 1 = Head Pitch and Yaw - // 2,3,4 = Head XYZ Acceleration + // 2,3,4 = Head X,Y,Z Acceleration // int samples_read = 0; - const float AVG_RATE[] = {0.001, 0.001, 0.01, 0.01, 0.01}; + const float AVG_RATE[] = {0.001, 0.001, 0.001, 0.001, 0.001}; char bufchar[1]; while (read(serial_fd, bufchar, 1) > 0) { diff --git a/SerialInterface.h b/SerialInterface.h index 3fd7a2146e..cc2d76600e 100644 --- a/SerialInterface.h +++ b/SerialInterface.h @@ -12,4 +12,13 @@ int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_chan #define NUM_CHANNELS 5 #define SERIAL_PORT_NAME "/dev/tty.usbmodem641" +// Acceleration sensors, in screen/world coord system (X = left/right, Y = Up/Down, Z = fwd/back) +#define ACCEL_X 3 +#define ACCEL_Y 4 +#define ACCEL_Z 2 + +// Gyro sensors, in coodinate system of head/airplane +#define PITCH_RATE 0 +#define YAW_RATE 1 + #endif diff --git a/head.cpp b/head.cpp index 98cbf0d0d3..d2b2a0ed02 100644 --- a/head.cpp +++ b/head.cpp @@ -9,6 +9,7 @@ #include #include "head.h" #include "util.h" +#include "glm/gtx/vector_angle.hpp" float skinColor[] = {1.0, 0.84, 0.66}; float browColor[] = {210.0/255.0, 105.0/255.0, 30.0/255.0}; @@ -56,14 +57,7 @@ void Head::reset() leanForward = leanSideways = 0; } -// Read the sensors -void readSensors() -{ - -} - -/* -void update_pos(float frametime) +void Head::UpdatePos(float frametime, int * adc_channels, float * avg_adc_channels, int head_mirror, glm::vec3 * gravity) // Using serial data, update avatar/render position and angles { float measured_pitch_rate = adc_channels[0] - avg_adc_channels[0]; @@ -75,111 +69,28 @@ void update_pos(float frametime) const float HEAD_ROTATION_SCALE = 0.20; const float HEAD_LEAN_SCALE = 0.02; if (head_mirror) { - myHead.addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); - myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); - myHead.addLean(measured_lateral_accel * frametime * HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); + addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + addLean(measured_lateral_accel * frametime * HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); } else { - myHead.addYaw(measured_yaw_rate * -HEAD_ROTATION_SCALE * frametime); - myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); - myHead.addLean(measured_lateral_accel * frametime * -HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + addYaw(measured_yaw_rate * -HEAD_ROTATION_SCALE * frametime); + addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + addLean(measured_lateral_accel * frametime * -HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + } + + // Try to measure absolute roll from sensors + const float MIN_ROLL = 3.0; + glm::vec3 v1(gravity->x, gravity->y, 0); + glm::vec3 v2(adc_channels[ACCEL_X], adc_channels[ACCEL_Y], 0); + float newRoll = acos(glm::dot(glm::normalize(v1), glm::normalize(v2))) ; + if (newRoll != NAN) { + newRoll *= 1000.0; + if (newRoll > MIN_ROLL) { + if (adc_channels[ACCEL_X] > gravity->x) newRoll *= -1.0; + //SetRoll(newRoll); + } } - // Decay avatar head back toward zero - //pitch *= (1.f - 5.0*frametime); - //yaw *= (1.f - 7.0*frametime); - - // Update head_mouse model - const float MIN_MOUSE_RATE = 30.0; - const float MOUSE_SENSITIVITY = 0.1; - if (powf(measured_yaw_rate*measured_yaw_rate + - measured_pitch_rate*measured_pitch_rate, 0.5) > MIN_MOUSE_RATE) - { - head_mouse_x -= measured_yaw_rate*MOUSE_SENSITIVITY; - head_mouse_y += measured_pitch_rate*MOUSE_SENSITIVITY*(float)HEIGHT/(float)WIDTH; - } - head_mouse_x = max(head_mouse_x, 0); - head_mouse_x = min(head_mouse_x, WIDTH); - head_mouse_y = max(head_mouse_y, 0); - head_mouse_y = min(head_mouse_y, HEIGHT); - - // Update render direction (pitch/yaw) based on measured gyro rates - const int MIN_YAW_RATE = 300; - const float YAW_SENSITIVITY = 0.03; - const int MIN_PITCH_RATE = 300; - const float PITCH_SENSITIVITY = 0.04; - - if (fabs(measured_yaw_rate) > MIN_YAW_RATE) - { - if (measured_yaw_rate > 0) - render_yaw_rate -= (measured_yaw_rate - MIN_YAW_RATE) * YAW_SENSITIVITY * frametime; - else - render_yaw_rate -= (measured_yaw_rate + MIN_YAW_RATE) * YAW_SENSITIVITY * frametime; - } - if (fabs(measured_pitch_rate) > MIN_PITCH_RATE) - { - if (measured_pitch_rate > 0) - render_pitch_rate += (measured_pitch_rate - MIN_PITCH_RATE) * PITCH_SENSITIVITY * frametime; - else - render_pitch_rate += (measured_pitch_rate + MIN_PITCH_RATE) * PITCH_SENSITIVITY * frametime; - } - render_yaw += render_yaw_rate; - render_pitch += render_pitch_rate; - - // Decay render_pitch toward zero because we never look constantly up/down - render_pitch *= (1.f - 2.0*frametime); - - // Decay angular rates toward zero - render_pitch_rate *= (1.f - 5.0*frametime); - render_yaw_rate *= (1.f - 7.0*frametime); - - // Update slide left/right based on accelerometer reading - const int MIN_LATERAL_ACCEL = 20; - const float LATERAL_SENSITIVITY = 0.001; - if (fabs(measured_lateral_accel) > MIN_LATERAL_ACCEL) - { - if (measured_lateral_accel > 0) - lateral_vel += (measured_lateral_accel - MIN_LATERAL_ACCEL) * LATERAL_SENSITIVITY * frametime; - else - lateral_vel += (measured_lateral_accel + MIN_LATERAL_ACCEL) * LATERAL_SENSITIVITY * frametime; - } - - //slide += lateral_vel; - lateral_vel *= (1.f - 4.0*frametime); - - // Update fwd/back based on accelerometer reading - const int MIN_FWD_ACCEL = 20; - const float FWD_SENSITIVITY = 0.001; - - if (fabs(measured_fwd_accel) > MIN_FWD_ACCEL) - { - if (measured_fwd_accel > 0) - fwd_vel += (measured_fwd_accel - MIN_FWD_ACCEL) * FWD_SENSITIVITY * frametime; - else - fwd_vel += (measured_fwd_accel + MIN_FWD_ACCEL) * FWD_SENSITIVITY * frametime; - - } - // Decrease forward velocity - fwd_vel *= (1.f - 4.0*frametime); - - // Update forward vector based on pitch and yaw - fwd_vec[0] = -sinf(render_yaw*PI/180); - fwd_vec[1] = sinf(render_pitch*PI/180); - fwd_vec[2] = cosf(render_yaw*PI/180); - - // Advance location forward - location[0] += fwd_vec[0]*fwd_vel; - location[1] += fwd_vec[1]*fwd_vel; - location[2] += fwd_vec[2]*fwd_vel; - - // Slide location sideways - location[0] += fwd_vec[2]*-lateral_vel; - location[2] += fwd_vec[0]*lateral_vel; - - // Update head and manipulator objects with object with current location - myHead.setPos(glm::vec3(location[0], location[1], location[2])); - balls.updateHand(myHead.getPos() + myHand.getPos(), glm::vec3(0,0,0), myHand.getRadius()); } -*/ - void Head::addLean(float x, float z) { // Add Body lean as impulse @@ -201,7 +112,7 @@ void Head::simulate(float deltaTime) // Move toward new target Pitch += (PitchTarget - Pitch)*22*deltaTime; // (1.f - DECAY*deltaTime)*Pitch + ; Yaw += (YawTarget - Yaw)*22*deltaTime; // (1.f - DECAY*deltaTime); - Roll *= (1.f - DECAY*deltaTime); + //Roll *= (1.f - DECAY*deltaTime); } leanForward *= (1.f - DECAY*30.f*deltaTime); diff --git a/head.h b/head.h index be228137ec..5875114e77 100644 --- a/head.h +++ b/head.h @@ -13,6 +13,7 @@ #include "field.h" #include "world.h" #include +#include "SerialInterface.h" class Head { float noise; @@ -51,11 +52,15 @@ class Head { public: Head(void); void reset(); + void UpdatePos(float frametime, int * adc_channels, float * avg_adc_channels, + int head_mirror, glm::vec3 * gravity); void setNoise (float mag) { noise = mag; } void setPitch(float p) {Pitch = p; } void setYaw(float y) {Yaw = y; } + void SetRoll(float r) {Roll = r; }; void addPitch(float p) {Pitch -= p; } void addYaw(float y){Yaw -= y; } + void addRoll(float r){Roll += r; } void addLean(float x, float z); void getPitch(float); void render(); diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 8930389ea0..613476cb65 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -140,11 +140,11 @@ isa = PBXGroup; children = ( 08FB7796FE84155DC02AAC07 /* main.cpp */, + D4EE3BC015E746E900EE4C89 /* world.h */, D409B9A6165CA7A50099B0B3 /* agent.h */, D409B9A7165CA7BB0099B0B3 /* agent.cpp */, D409B988165849030099B0B3 /* cloud.h */, D409B989165849180099B0B3 /* cloud.cpp */, - D4EE3BC015E746E900EE4C89 /* world.h */, B6BDAE4315F6BE53002A07DF /* particle.cpp */, B6BDAE4115F6BE4D002A07DF /* particle.h */, D4EE3BBD15E7465700EE4C89 /* field.cpp */, diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 1aded4aa69d1438242e9e35135534eb0091d71d7..9c6cf6345d1121e26ceb918d0ca0ead6635f79f7 100644 GIT binary patch delta 30392 zcmaI52Ut{B_r86Pa!%B!z?`ZSX#zR}(u-j4y;nL?lo>@4E9%U)_l7z)P}FD=jmDT5 zQ;o0bG0jBNjp@Y{qsA1Y`CkKXLf(A8>qF49_ImcSp1s!En|;P}56OQ%ByZH4zHMIc zl=)dAeUACdN00Y9-rP0FI@mhII@CJRnq|$l=2)j#%d8Ho(>m8W&pO|_+`7WL(z?pJ z)w<2u-;=mXnn|f!uqiF5$hT2bJiEF=d7<=-?Cn`zHj}=`kD1}>(|yFtUp?B zSbw+Pvi_;~D3T&8EtOV^zoILKVk*H(J0)C+Ryrshm4Pm0s4`3$rHod_D-)C)C08j_ zCMl)LWW}jWRc0s^%4}thQl%_V+=@_^Da)1B${J;ZvQgQlc$A&WUZqaCM|nVbNI9uI zp`1~kQ(jSCQ{GnIRW2!?C|@a8l%JFv%J0e@)knQcZKe9Fh8n1bs1a&MHC9bhU7gkL zYHxL*Iz%0%j#ekC`D%e$tUAgm+DvQ74--8NA;?DLzA@@ zS}V<8vue6#XaQQFOAFCLwf0)17OQpA;!Op$Eo}ee{NqVxLqNnOGSmky3iNv+w~p#PJNfYTd&b;^*#Dty-we!-=p8BAJHFl=@08q>QCuU>!u>09ssZ|W{gVE%ep&xS|5X24|3?2_|3SZ@|7x@_?lxK)t&G-&uiA`fgfY?>Z)6)KMyWB`m|~P0GmT1PzTq}j8mo-e z#%9A+ZEP`i8#P9)@sM%Cc-VNvIB7g;oH8CW9ygvb&KNHkFB<2Kw~V)q3&wlK72`YO zd*cV=N8_sTlX1}~cj z`ryl-(`gpG^H7?=AmhYTWU~%?IEevLDx35DYhrQ`9^$pRbqTvygej7Jtb}7gsOz}#N_H1u5FQvW^FtFU}$y0 z_5A@w&gik-nr2nSr+7oBq{J@F9r+>jcG#NJ<8G%HbK&zv_3c|xO&3^2y}ZTNCB#^l37@Zo&)4&E)d#!a1;eQl9SF8W{BdqDY)(5=wuA|mt#8{6DpKHSBdOiJ(`qkBZ z8=G67)U?DDZ$rd+o8zD9eboA-JM)0`l=U&|uKvV z!sm{Ve1z0ONPgbBVpj(Zg3)L#UcB&!2VHPld!*e@le)EYCEI|QHgw)h4Hz&Nu z$?Pd9vCY<hb`@-{nl&N>s#-$^zgrF{nbnUi;$!`>u*Amz2woiz2tX< zbl2bH3f3!16C#Dw+{-OBbLaf;GP`+Rv{)4XW=acpW}VVpNO#vMcMGZIR)0%)q~hz& zt5f`h)T)utYQ3VUUP9k`!q)Db>ehdD@l-UiDE@7gKrc&xko@bEAR$>BSVFxlVM08wUP(|Al_VuuNl{XjG$mc> zEF?onZG;pcq#z-+6H=&3A!P|EM@YFsn)G-3D1++@Et#HhA4`vBBO0u>9;eqD+h7HH z9`RaP4OXz{j@Qa>utGfjBukHG#SK=NXQS6DYp^0b?|H5A1}oANCR=(mt8B2MJr1um zufgi*dDv?$YOw5{+g@u)gB9!PdzYn0vy}~2oM(gATGwDDc;3Cs(!+nVvc+4HY9S@n zDO-h<>@7)%vcsLX_wQC!b}O}BP>qmM>y$l0O7nu+Df_*3J0PU=3w)~_R1SIjl_K9g z%Kge=WrXKgGfSA-SxA{eYWo(=m7~fr zc<%6m%E|kbM=1a2Km+Pg<&>v;b4x(e$CSrCxy>yZuBVjKjjEmzQrGh|=Wf1K&U$q{ zE2M7!*7c%tSQ+c4nUK2I%L@FbtXGAU;dfW#qTf)?H)iH7A@%%={-T%u9pRFC{hR*- z<*<@f&)>VAKlq>gm%aQg8~Hy|zG#&HrI7mm#s7_$|63vT`#1d$%3&qHp1!}3GF?In z`6vH%FTd2te^c=u0spP+Z$cXQ7yln#{@X$t^e=wZq8wI=>-h)Q%Mbepy()X@Tm4P1 zHg~Ca|E*tbDWswQ&>kn(>dB(jk#MRle9z5M!()9A}o zyEX>uCZzFy1?uSy)JsSc{vD{Fa#)#HA82BIW;*^e^Mk#aZ{0{gOda9QtW#SI7gF}$ zdp0no5inLA|9AXoMflvk(b2`S&ZlU?d0udpH^75rP+WaY53vR+tWy|B1{ z3Y+Q`)}k@)bhYB|xav$H75%mD*zcL*|6}0MmD#)*eg{CskBaA zBBaTlM_XCK0#>SP+?o5-RqARXl?lnQPhG376OvO%Q$06YSqj6psN3C{`_--LHq|4f zav@C@(v1D;4s~aXwL+>8(i+d))|QHXb?%G<>OOV9dO*ELNHc{rUr39Dw75~hz3LG! z(|zjw>R};O3Tc**X75)YP>-s|gfvG;bA>d|6Yp!OZGTFA^8bQ9r9Q2m7E+av76@sf z=Z3E(q{nmWOaB-2W%U*HoRC~Xatld>ce_gc*CayPHzDM&E%2t0Mm{>m&r;ST^KJD5 zcjiI$KJ}vdj(S*qPkp~$;}Rh)71C@WEfdo6gX)JCRlTHsY*8($kXE>Jg|t#gt39`@ zmIqv4(_H;V{nopyDyK{>4)1ZYxOmE|H!_8^s$pw=r+(kCi;DND-%+0D=Qp~%s9;`5 zL1jpM=lIl+;Y0ksZK&}X^_n}Q4qtcE#`N7(@3=D$slTYds=q1e>MivT^|p}K3u&W} zHVdiKKp|}r(l#M&KcxPtp;@#hT2swOlZ3QWNPC2IKuGrr>9CNF3F#p(Z~d)JYu4}! zY{TF8E3KtxhHB~6%(ua@$@8RY8QDT@u&UiuU%t@iZmms&z12;%-{+y#!$S?vUd>`{ z72GiHag*oQv--^2ojt#Khr=6&cX)?;)en!iD!g|tgZyZ32!Ek;N+ zLaOz+bxVhUBrVkoNY+w>v{y)V`?NGIT}b;{g?)SW@TLJ^dXyd(rv0jEF^$ZiVi9&inNJkqO^0i6-(om!o z3+X{29rrwDSONpew5k6BOw-DRbV5iEd#)OmwxcTD8TV_mwAtDmZLT&?o3B-A3$%sW zA|ag=(kUT5E~F=g^t6yp3+arI&I;){A-&L8Bhiq(M;mLjtiDFdfA1oXH9FRMa@$(s zJzuu5_$wP4rajTZ6fFso#FZNQkV-1dH8yzR=C06HMkgMN+r$rm>WCP;) zM#SUZ*Mrn*@sYi{*=MS%J=K7Cu@P~mI{u6Pf%Wf$+OyiL?#z3&=d|aw7ql0(m$a9) zSG02;XMiQJ)$7_D#Nex)BSO3I=+*$sa7%|z+k)c;JyMVI zyc%q28`VMYsM~bA9;3(Vo%A>%eJ`Y6ge(bJ7IL7FLxpS?a=%Agx3kEWlnlL>JM)m9 zsdv%4>fQA2dJny)kbV%-k3zaCq@RRzO-R>;bmI^a#OQsA;1zwqqXR=MO)asb-5GoT zTHrW$M&or?pQsmlX|wcfJx9;g^YnbZz*7}!3A(F9FV!dOQy$$DY6-E7oJn)bP`xsH z=%|qe(@KkbmCu@1DP&9i*{RRg=S=+XeP&c~oheVu1xxTRgkjrt~iGco!W zeJe4#$NSK0Eq;{cr=`d$~TNA9ZKc>Boeu{gu0iyaOkMtT*<>N&PW*+CKeJ{gjXm zA)EVJtks_oP0kCs&B%dMibht>8&Ucf`I*1T_5J#@UgIwwU4KD;#htlde^Gx)e_6;u zLJk&kyZ!n(B8d@lh^KRVOF^f%_4nMF2lNa2Mg1N9T_J}FIb6sQLT-ORf1eorL;WKm zN7iScgOEFVUT$wGi~C&v(w$bPe<9?kI{hmlN9XzZ4(Q!yY=LuD@ys4GM^_XVH_Eu8 zf9HwrXbE?<)UWE-8s}XXvaJzyQ~$-Cx%VIHe>a+V+vt}5hkjeXqyK4OScDuSx@7lr_~w3LQbDBWm;u%MM=S=#?=~O zMuZm>F67R2MtdP=j2%@ztKy%C4u;K(=qTh&SDj%Oa+kjkli`2mqoomVBsMBb5^}e{ zT)L5Jq&Mp5EadKut}aGTcjf^j+^`$njUGboEaWai?jhu!2Ml}tpT6}YUF!et+Y3ow?7j8$;^Ry}gfKxkBzTcFNR((nf4cW3(~0QSLY) z_idz^Xk`6QWsO{;(4D#8$TRW-i~=F|7xDlh58O|TQDhX?*K3fFhj`X@uq+C87*qf5 z7-O1{2RHK0FeL{Za3PNp^4Pzow=@HI^GIggipXBR#bpEs+6hjSc?-Y&13rd9;wncs}lE2@Tk0?D!X8r?E@Os5AB%`;7y}J;p)fka4e&Cki=B$k{^95pu4O^Msr)X*})ifv1E#sm?eprkH@UucLt}~t&a>?i+1yiOqF8XETHFw&6;}zqa@v4wZg*;ivQ}!FL6KT9D=g1;Ax{%>xsay| zd4`ZH4jG>tUl?B+9gU90H?1xTd8Uvnh3pb?X8rdP(ex$H${5QL{~N~7#!Z?VzZkz7 zzZt(9w>*7fEn`|N5b|sx&l2(?&w*G=MyiQBZ@+0Vo0v_7JV(g$ggpN|%}vRajgAus z6ctaaoKiBSxI)Ntg`8P`Hk-}O7M{+XEbW@^Gw=4Kb+SbCs)vWyV>=v@9FXm$JOG5Q*dtKW9{W<=^>b8CzJe}e#&6>x%^Xkk5 zAuscEkGF)TrkI)T%zMmKGtEplJDV9oULoX_LS7~0)k0o#k3}`Rn%yj_*~6VH0j`R+U5b_3(J;4$_c$_)m?=56b6!ND3 zZXq+row3i%HS>hLS;$-e>V=kOp;`RDy>FJ7r9!S2@)pmo1WRau(=7iNV7fU&$lHYM z@w}5@Y3G`4&iS8X!JKa{XwH@TS2U>jytu8^UPoUMW(7GM8wnOV!&(ai2 zxN8iw9tW)_K-5tq4jKNJr`P6LF+}(T0rZi(0T>5UJb3+LFpu{3HT<1Z$I!Y1>eQsyBmB@fbYZLdj@>Zg6}!-eGOc`Z-DQ4@O>M6KLX#6!S_?} z{Q`Wy2EQ=y8w7r(;I|30y$^mLf!}5D`waZP#D(8=@cR?|?*jkk;D0yxw*r41{6oM$4E!s=zY6@9gZ~Eb zuLJ*k!T&h;KMww9!T%-je+~TK2LDgM|1pyT zEhyiC@*60(K)DUdpP)7awFRhIpt>f5S_Y~U)M=p30d*dzPlEaqsIP!}5!81*v7*;UafRPMFS1`JR(F=?|V2lMLAB-|E zW`i*ojQLU}7+-+#6&OE&aTSbfVB7%X78tj|_!CSEn9W^a`hl4YW*0Cg zfLRD;5tyZ5P62Z^m~+8g2IdMdSAn?(%ynRH0CN+VwP5ZAb03%oz&r@%yGLfhrgwhr3f4{eV?+i#)mb!huD1f)PfHwfqf z0d5Fb1+IWK5O5FzjzPe22zVF*PC~$G2si@)A3(rI5b!Ysd<_BLL%>Z4)FChk0=q!q zCKR+u0t>o+{6XJcR_G-2)-MF{UF#1!72m?L2x7lM?oeAyQLc2g{7Yyw}pj{ZWi-C5Xpj|w)ON4gG&@L6)l|sAC(C!Shy9yy8 z5RwTY10ZA&gbam{;Se$kLdHPI6bPw+kV*)d2dWBBm@k9{LRc__*&r+i!a6}%JcK1eSTcmALRfDI>kDE1T@W@9!UjXwPzW0i zVY4A@GlV?`VIM=-pAa4a;V}^23BtQWcuxrL4dHzuyg!5wgz&)-J`}=-L-+&;&w}td z5blQX#SnfUgg*q~4@3By5dJ=dyFP^QUmyaAXaW&-h)9NrREU@Z5pIZB3=#Vw;s`_> zg@~6R;ygsW4G|Y0;!}wD93sAeh%X`HIz;>o?Sb}9puG>Ym!W+#Xx{?bYtY_+_HDq` zJ^VM0SP9kr0^$kvR}q2$4k) zSptzWA#xT(&WFea5V;5Wmmu;rhnElXo&6z z(Wwxf4$&D9-36k%L39s@?gi14A$lo9ABN~RA^I2SU_gfu=nw`SnukLNJ9LQkJlWmy zimMIS0>Bmowsv3(16zBrMT5--wpg&mgDnYcsbK32wk}}n4z^xk>kGC4U>gj!VPG2x zwlQEE54J3@<$|pMY(-!z1zQ=|rh;uc*k*#uHXCg7z_tKvF0d^I+cL1N1lt<0tq0pC zux$aG2W&gRRs*)ZVA~J2gJ8Q4Y)8O$3~Ud9?Gdn@0^1W{dm3zK!1gTIUI5$6V0#s8 zZ-DJBT-Yvx?LDx42)2*G_9@uD0NdAKy8^Z!!1fc^Zh-9B6U+h7OmO~EdMy*b!h zg54MFR^Wf12m2(jmw<@$eQLsM__NT!94A{?t{dusz z4)!;}ejV(8fc*}{=n&HmVnQJ%17dnZOkap`Ld-0PnFFqvtq@ZSF?%8ANr-t4VqSol z&miVIi1`6xb%<>Tv7r##6JiHJ>=1}`Kx`$%&W6}Jh`k?Tk3j52i2WF1KY>mW&?yEw zb%IV=(5VPIl|ZLm(CGj!Ivs>gFF~jC(CKaH^asRA5O){E*&r?n;!+^47~-Zu+;oWZ zK-^x4+Xrz^Lfmr@_X5Oy32{F{+)ohi2k~tnJ^VFDy%LBaw^SOy6zAfXl#4ne|wknkKN zyaowxK*Fz(Xo1A0kZ6O%BuGqw#F3Dg1&OX4NL&bs%OPOsiz_JGNgV3saGIPg0$9<<_BqZNK1yaR7lH$ zv?53=fwaYtwg%GHLE2GBd(;JKk3rfekoGO4eFy2jkZwYHTS)H=>AfJm52Vk8^aYT< z2-5dK`e8_a0McKD^ox-GE~Nhq>35(r&^aDDcZSZH(76ygJD~Ga=v)h(4?*YqptI|B z==?5pejhsDh71`pnn6Z9WORm%OvosKjB?1B0U2u{V=H8MAmeGscmXnAf{bgBaSJkT zLuNE&#zSTzWR8N&Y{<-o%$1P22{Nl8^9jg&7F?OnL*^C8yaAavp-UiiX%AhZpvwU0 zG7`FshA!36WjA!Gg)SdKmoK5q*U;4pT?3$N5OnPZUHe1VfzZ_nU1vepInZ@8blnAA zYoP0+(DfPU`XA`}4KBJ~gRVEAn+Dy2p<4)a>kr*VK(|rQZ8mgsK{o;24nVgDpxZI% z_6Br&54wE--Ts8`&7gY==spy>kAv?t-2Npyxs8`4jZ~9eVx&y%L~T2K4Fzy%s>P zWzcH{^m++;orhj;LvLT`Z9?z1&^s4;mqPC;(EB*_c0CTgpM>7OL!YM5M}j_mpwCd~ zGaUM?gFf4!&vxi@5&C=#eLjJ{;n3F(ePf|-3G^+8zB8cjebDzI==(7A{T}+>gucH* zzclFA1N!xXehZ=Ba_F}bT>V~ye(ylP_n?1U=pPRK+e80+=syMeJD~q#(EmTs|5+H& z0tP5BK!X8UFrWwql)!+8VZc)`;4}=l1p|FxpbP`M!NC47a3Bm^2m_bHz?CrYEDSt{ zi-E7fAb%Lt76t{vpwTcW2L|QApnWjtFbsMC27M2MZo;5nVQ?A@?g4{)!Qgo?crgrK z3WHyT!53lhyD%gKhID`-HW;!JhHQc%)iC6J81fkm`2vQhOU93 zXJF{dF!US@3xZ*hFf1B|mBX;PFl;^ydlrVh3d3H9;U)|Zh2h~aycC8{hv5}4{0t0# z8HS&O5v^c^4kJt$kp&})V1%m#Mx2BZr(why7}*>~T4AILBPYPfLKs;DBOiy6XJO=X zFzRj?rNSs3MvaA0c`&K~Ms0&pdtg)@jQR#fU4v0KV00fCJrqU{htVrx^d=Zx4WmB* z*XYk-^p`Lu493`CObm>vfH74tW+9Au6~|Q1W-y^QOc(+ahQWmOFu?;8cEE%WV8Z7x;Y*kp2NTm_Vg^iH0TVaE#LY1A zEtvQrOuPhHZ6PZPvN}LkN67Ntc(c+Vt21QvfvkRzH2|^(LDmq+8V*@mkd+Hr1&~z) zS*4Iw23b2H>m+2Ig{qW?V8M0o5tT!O*J;?e1vMxi`XOQ(JWL<}>pCRja$od1a zTz4S5DP&8K-3+q*Av+kdLm<07WJf`E2gr_v>^R6ygzQYn9uL_CkUa^qOCWm+WY34} zg^=xn?8T7146@&Z?Drx2E6Dx^vadq+b;$l1vVR3v_Mec`1ac(EX$Codkdq8K-5@6m za*80Q1ahW8juUd`K~5Fqtc0A^kh2zY)moJ&?Nta!*3;X~;bTxgSFA7m)iEd3PZHZpd#1`6lGIh5SIs4~F~@$Pb77 zSjdlu{3OUvh5XKt-v#ogLjEeq-v;@v?U26<@@pV}FXZos{3DQm6!K3%{z=Gx4Dz3c z{1+krRmguG^52B~i;({=>F;f=nnF0tLgNU?dcbfr9Z+a0m(>go3A_;4~CG4+Sqm!OKwaDipj81@Az?`%v%^ z6gGjvW>6Rbg)var2?`USFc}JaLSb(x907%+pl}Qnj)TGpxG2nm!W<|pgTkp$I2{Tr zps*4OXG7s!C=^h*1PZr9;Vvkwg~B>0JOG7k!X)q5N|>}7Cfx&*j>4n|VbTvU=@*#v z8x(bhqFzwc2a1+L(K;yF07dsh(FrJe1d2{V(OD>Z4vLV%O<}ShOt!-0 zb}%^%Cbx&l(J>DWi4$6LnvTIQGGnD-bWw)Trd;U8t;P3&*UEpW|j#l9C1BU_*9UN`I5eSZU z;0OapdvHX9!v>C6aKwWn2^^{5=nRf7;OGvHUf}2pjsf5p431&o7zvIs;1~~%EO0q; z!BGH?B5;&~qYNBV!7&{iGr=(%9P_}j030rGEC$CiaI6H!8gQ%!$0l%W0fz@1JHb%{ zj=kX64~~Q2xDOmhz;O&54}s$maGV0i6S#0Z4URM5corNlfa7Iwyb6vt!0{G1E`sAd zaC``kkHPUNIKBYK*WkDUjvv7B6F6>w;}>xJ4vyR41e{I5DTA{)I9r0#7o1jbYTz`% z8Q=nEFgQcO83E2HaCQV|3^?P!nF!7laHfMZ6P(?^*%O?7z}X+1gTOfyoFl+F8l2<6 zIT4&W;LHc-Byg61a|$?};4BAc1vqDcb1pbtRp49%P65uP;9LRD)!O!1)X~&w}%LaJ~f2bKra(oae!L z0i5rG%lQE~FM;zDaDEQXufX{&IKKzyRd8Mh=S^__2F^dg`6oOC;^C`^42roIMK-+`&`!PK8&>K$B61EvMSwDvG93Z@N% zY2#tqM3`0u)0V=t!P+khEFQgYcTx=%urxPAj}Ac8JRGn56tKXGb&(470g%&GwNW*{V?MQ%y=DUybCklhl-|9 z(Gn_Lt)apO6-iK$0u`g6A{#1ly-uiD2^FiMq7EwVhl(Rm@gh{b2^DWa#VwfW12biq z84EL0VP-na90N0RVP-zeTmdsT!pzMu^8uK75@w!)nHRw|^JAF#3C#QxDw{!N3#jY> zl?hOp1eGJ9G7BnmpmGsZu7Ju_P`Mu}k3i*7sJsZ3A4BCQFsmudY6-Ji!>m}Cl?t=c zVb&y=<%C(&VAfKYwGL)&z{RYmVb%*U>m`_V1!moVSvO&}3A00Cb~wz=gxP&yc0ZUs z9cIsi*;O#R8fNc?*|jkH5t#in%zg%DzYnuNgV|reoK`SLhdCz9iHA9zVNNE@$$>c~ zE|@bJ=B$M|TVaj|=A42#XJF1*nDY_L`4Z-Q4Rd{9ZY!AU3v&};ZYIp_3UepI+(|IE z80IdAxf@{aCYbve%>56{eHP|^1#_>$+-oq;3iASBUJ%T44T5>2VcuAnHy`FLfqBbd z-aRnyD9n2h=DiB@F2cNbVSaO%Z-x0P%18cfouC^AExN2Vwq0F#iI~ zzXbCyLsfIAvO<*#Rb8Q~AGoRpKvfY`O@*p*sM-cqd!VWgs$Pbwx1j0*EC3cXhXr@T z0vjwyf(0qCARiV?fdvj&Pz?)q!-85^a26Jvg9Wd_f*Y{lHZ1rP7KXyYj1#u3D_C+B zmRy6SDX_E~EbReHSHaTFuyhM7Jr7GifTbV7vLIL%3Cp5kSure|2Fs><7k(6$JqpVn zgJrj1xeqLtVR=_r-Vc@!faL;~uZHDoVfk~g{54qq2CVRd6>VTe0IV1TD{^5)KCCzh zD~`d6MdCHA*{Lts|{Ek0;|Jd^%z*43#;>C^)6U_09GG_)!)GCYq0tTtg*wIWLT35 zYv#k6C9q~0ta;7_YhHsjZ@^kVSlb5H2Ef`8uy!J>&4#u2z}lm*_CZ+tE3C7?x~8x$ zAJ$EQbq-kf5UhIw);$I5C0O4Y*89Qw@vy!C)=z@<=3U!p0r2aUX0v02{x6jX%J~tFS2%Hf6%5 zuCQr6Z1TXS9kA&W*z_%I`VKb7!{*MgITJQ-2G{0Yu(<{{e+HYqfFGbb7OGRBIvuK~ zLG>J{o(I)up!#K~J_lQZU`r%yiH0rtuw@Etaln=nu;oeE@-%Gu9kw=wtrBb<2wO+N z)-ka49@u&mwmyi9ZLMIN4%~f8VU6Wu}3GA8zyPUAA9ClT}u6?lURoL|h z?6$$~EZ98fN>&HGUE71VqKHCLhLIxcE%Ld|bb3)D7+ zS{Z7aL#-dw8c>@Fwd0|-5NaJzI~8iDL+wncbwll9sNDdyo1u0q)NY5`T~J#KwRKQ? z6l#w{?ZZ%e5^7ID?c-4UB-DQDg4#QKgXba0c4tZvzA4jmFYgx*3gN$B7&1Og;LE(wI*LkXRG<=#8C zalr)}cPzPg+|?{gvbr4J@%-NX=bYU+d(Ja+W@l&j*+Q}($s&^dNuEaXOp@o2{2j>) zNM20xGLl!2yqe^7ByS{nGs)XX-bwP0B<~@4@Kj*(A(8_~K1T8hl24KRJIUurK2P!m zk}r{bmE;>F-zNDU$qz{Wm*hd9Uyw|a97u8q$>AhNksM2M0?ElFr;?mbau&%sB)=n( z{GQ}ulFLZ0Ai0v{8j|ZtZX&srP@)a9hE zB6TgP8%W(m>Q+*Bkh+`HpGnOoSEkb0EV^*2(_l1h;J7pZ?AR9+$VI;pov zy-VtSQXi4}gw*Gxz9LmhYA~r`q(+h&Lux#!Nu;Kb`i9g@QnN|TBej6kB2r69{XnXm z)M`@eNNpswh19l#Kz5SaL#m3@UQ%_W_LFKR)kf+7r6@g=(!(h|lG0yN`fEy$qx3{d zPocC6rD>G*q_j7sS(N5dnonr~rNxx~hSJk1J&V$FDLs!w>4lVDLh0`*y^_*vD7~K2 zKTvuLrMFXh7o~rq^kGT|Q2Gv~A5;1%r4uNfPU%ca*HZc;rP~>JBm<9Q;0X*YV&It! zJez?xGw_cL{4)cemoV^E2ENX~2@IUhz?lr(!oWQYtYpv;3_6xU$1~_`23^FUOBnPz zgWhM*hYXs-pv4SY%HTs7{0j#Eiopd8KApj5GWbsnewe`n82lcCKV|Ue2?j4=@G1tc zVetPL@^gk9$&hr0^kGO}hFrmr8yRvFL!M&D^9=bHL%w3jFouj^$ZCdcVaSgRJ&d78 zGxQjS7BloLhMvRF`xyEtL;u3iw;6hH|99vo4E>s+-!gO#L$@=unxT6cb{NBsX4o+d z>%*}A3_F!!cQWjLhCRrz_Zapm!#-!&REEuA*gS?+F>F7>nizg8!#gv)E5pxV`1uUK zki_smG5leM4`BG44F8bf|7G}OhRyW=sWRk7Vp| zj6H#|g^WFev1c*%5yn2r*ryr$1!IRWb{J#VGj=;;cQWo5j5~pGCo%4H#+}Ew3mEqZ zd@&9G~ zCybxR_@#_r&iG~~V8S6x$YMeP6N;E{6BF)c!k?J%ZzjCOgm;**oC#~0uwKH%PE72@ z#NJH2l8Jv{;>}EahKVmS@g*h>W#Tv{PGI6PCaz)PIwm$Sv4e^KV^Ri_`ZB42N!Kvx z7AD=sq!*d=CX?P~(s(9)!=xEZ+RCJg1e2Yyvmd{Oxenm zZA?w%Gxax2J&maYnEDh`|HjlQOr6ctxlB8ZX-6~d7^eN6Y1cFDMy7qjv{I%GVp+n<Ve}Dyt zv)~98T*`uLS#UiIK4QUFEJ(4So(1hJIKcN8^8J;3e>LBK!1rJ9{a1Y7#P>h3Fi0%C zn1xrf@LCqW$HGrp_&E#9Sh#_On^=^=qP{FDV9`HW^csuaV9`7lEoISi7N5-G9xU#~ z;yYM;AB!Jg@n{x*&ElynIg%yEvE&4nJjRm0OIY$OOIERDGfTFzG@Ye=SlXARPqFlQ zmi~*S-?4NVOMhV5@ht1gvNV?6&a!)1c0bF8uxu>L#f42#SbU*Ll1uF#Sahj!(aK~DSnvC4@>xA87qFxic?t8nH4v(;!alF z&5D1q;&oQM$%=Pa@gG)v#)>akF^v@qSh0{5i&(Lk6>C|sp0dP+lwCsE?IvTG>2 zp0Ynsb_->PO${wKXVaooGvcFLFSIVBI>>0}bLD@ekdy%r2DSM5wHz|9E zvj0%_A!Q#^_8DbgQkEi7Hi)vJl#QTlG-cx`n@HK$lue^-24&w;HkY#blr5xe31!PE zE2C@`Wos$hK-p%>ex&?z%CDmQTFP&r{3gn8rTh-c@233El;21BgA&Rgq5M(GAE*3D z%Kt|Bvy>+&{}<)|ru-GkU#I*n%HO5@eab(g{1eJQr~E6*ODP{r`7p{yQa*nPty`4-BzQNEM%J(M?6-b{HL8 z_Bz&n%G#l<9nRWOtR2JJX4d_Tb%(R=NY?#=btkayB-Y)+y8Br70P7xR-2m22X5C!Y zeaE`*S+|Jw$FaUU>wB`kH|sN5-;ec$tbdaAFCswjh&W1DDa0weOW5X3}xQY!ovEddryvv3!*zgq_O4%@o4WrpGmJLm8Jd}-x zvGE8t9>vDQ@oYSijhD0WW;Wi+#yi+}7aJd7<3ns5!^Y`soXN)7Y@Ex+C2U;ArbF3u z44aN)(}`?4nN2;|)Qe5`vFQmmJ;|oOvFRB$y~w7Q*ffJpi%4u*!lvbHTEV9EY}&}? z3^o_BxrEK9viWp2pU374*!(h^-)HlOZ2p+dpR#!%n+LP`VBZQhSFw37oAG3Y}vq;J#49DOAT9U+0w$6 zHntYA^<1|8j;$B4^&+-j#nx-s`Z8PJXX}S-{g|zvvUMO^2eWkxTdUc+H^J6Awl=V} zgRTGL$D8=^9)7%!A0On$hxzfZ{P+~xe#W+A*>*hJPGZ|BZ0pIkbhh2fw)@%kAln{c z+yAlcX}0~HZFAVRoNX)EwvuhD*|vpki67bCh3(mF&t-c)+xxNoG`63?_E*^c0oy-f z`zLJwjO~NiK7{S9>^PhqN3i1;?D!QsPGZL??6`>?_pswWc09k@Wd#;z;abrrjAV%II~dXHUSvMb51f$SR0t}*O7cpYe01-qKq)xxfJb{$~%;p{$w z-M?q|P3*pf-M6#*PIlkV?g!cZCA&wmdo;VhX7@C9&tUhr?4HZ+CG1|#?lN|-V)t5h zH?XHOd$QScHG2;7KYQ+F&wcE9ls(U}=V03_?0KC%Z?WfH_Po!Y&)M@8drH|em_5VT zGmkx0>}jQT z7L}8zoJ(aDmHVkYK-FPX9Y@s(RCS}O2UY1*Wm1(xRS{MFsX9$U)tOYCL)FDpT}{>P zRNYC{{Zu_f)c~pOHDHqiQf!6R4U*)l8~pQ#FsO1yn7f zs+_9TRIQ_GBUM|dI+*LKI;z^J{yEi0QhglNCsKV1)m^AgqdJS~T&nY_E}*)Y>a(c6 zlL;jvit6X7eu3(jsD72|H>m!A>My81NH|tcqWWv9zomLE z)$^%dNc9q`S5du|>J3zHrus*!E2(av1~rFJ^9yQ@rsh~`PN3#wYI;zUPE95?In?x_ zrav|3Q1g3guAt^d5;Zqda~m~xQu9Y@9-?LdHIGsA1T{}l^E@@LQu84-|D`5L%|L2~ zP&1sGQPfPPW-2w)shLI19BMYP_h;-qj=g8F_k8wV&EDJDdnbGEm$3IC_6}h0W9)r` zz0a}tdG@}*-j~?>DtiaAcQJd{Q5)1AMeXs__Mo;GwRzO`rM8gT5^7JS_P5lYPwhq2 zUP|rd)ZRet9n?NV?IYAanV|M>)ILk?LH%FUzE161)V@pY`_z6!?RaVr2H(xTU$gHN z_GPfIAN$T?-#P5Nn0=SA?+W%^&A#i{cN_ceWZxgzcMtpSXWt9#8_vEd>^n#>@2g;6 zBl~`$?w8aZP2I`Vb*8Qxbv>v{r>+lm{irLVu0M6BQFk46f2Zyh>c&zxmAbjqEv0Tf zbsMSMN!=dms;JvbT^)68)EyvEkNQKYKb-nsQ{R>POzMlN?@#?X)c=n93#h-C`pc-l zj`|y^znS{msK1l?`>B77`oB~EGWD-g{~q-pQ2$@*Kc)T)>W5H2ocdAJALRe)Cs03~ z`i0b&Q@@q^ZPZs$znA(t>i1LMOamGYrQvWIj-=t2G@M978Vy-Al+f@S8qTHRJQ^;f z;Sw5tPs8;z{DFpBXtoQU z91Rm`m_fsQ8h)T*6AfEv*h51V4SQ**qhUV{2iTAOhqC{0_8-aq zH14CZmBtPlf1>FSnhvAsXqr0HltohxO+_^Ir|C4B&ZOxanl7g4GMcWS>1qj0*U@wv zP500=fTm|?dXA=-XnK{VH)wjBruS(2l%_9eO42ltrXe(qrD-Znb7)#l(+Zl_)3k}E ztu$??X%|hkG}Y78NK*?T1L^ZPV^MDr&!e@^pYn#a&Q<)Hm{G%uj} z2b#-iUQP2lnm5wCljc1%SJAwe<~o|&XgQpgU(?c+mNZ(jXvw7|pOykzifK8EmUC%2 zkCqE*xrCN$Xt{%y2WWYcmZwRy{F9a!X?dBJ*Jyc@mJez9n3m6I`I43tEhA`|LdzUl zmeaC=mi4r3qGc;B+iBTFOD!$+v^3JvLQ6ZXKcn?nTD#JkLu($bCA1!V4xsf6TF<8S zx3peL>*cgwMeDV+-azXev_44d6SV%5))#1fgVwiceUH`;X#Fp(Nm>WeI)v8Yw2q>6 zGOcrHT}X+s|nG6>TTe)|0lrv=z{HI&Ejs zb}nt_(RLwiSJHM3ZP(NG2ik6-?N78lO4~EEy+Ye-wEc&+4{7_Dw$Et$lD32Mf7=M! zM$F<+QD$Z8L2@(zb)P-LzHG)<9bmZLPF*(DoDUN6>yO?VV}QqCJQ9 zBHH`Yej4p((tZx@7t?+j?N^X!znb>zXupm2due}^_UC9%(EbYTuhae(?eEh5KJA~= z{uS+|v=63z813U}pGEsZ+E>%Qmi8ZM-$DCs+AC?Vp}mRrR@ysg|A~%6By{|Oj!txB z(veL^Asr=joJz+Tbev7cMRZ(B$K`ZfMaQ*t+)Bqibo?J3&(iS^I$oyZH9Fp;;~hHw zL&s-yd`U-&jzM$`rDGf&GwAp}LB}dO*3hw)j_q{pqN9S2YC0O}XrZH>j{k8WIB*mP zj^{uR4)o$c9tZkzppXM495|H&zvaOB9Jq)BmvZ284&1giwIOQkN=?Ky3DADP6qSKY4)77HWb)wS^qSGCs(_Ny|2clC-bQ&l+ z4H2D&iB6M6rzxV-H=@%F(P_Tu^u6eOgy?*t=zOy1+*x$)DmrI~&RHVS`A*UKVbOVj z==_-I{J7}+tmym?(RrfiJX>_0D>}~?oew%&Av%|fF2538I*Tq{MVIcPOHa`yS9Iwk zx(pCqo)KN16J6dCUEUR4-WOdy5?wwKT|O6Gz7kzZC8En<(dBE=Wt!+RLv;C8beSu< zd?&hmFS;xeU6zV2dqkIJ(Y1@{daCGpuju-U=sHkz-6^{MB)T0Tx*a9D{YrE@Ms({Y zx^)-b3PiVJ(d{>)+v%d)S)$vyqT6{1(d|Of?Gn-LX3_0d(d~B8?Jm*nPomqsqT2(a z+e4z;0MYGb(d`q_ZLsLJT6C)rX{U&^Jdt*~NIOrYT_w`45oypE7InRwB;ggqe$B<(rQIoy+~^mX)PkHU8Ma_bPv(}XQKPhMfX!g_b#G)nn-l- zDZ2L--LpjZT+uyWbT1IyuNK`O7u{bL-G_+o^F{Y6(c@^*qmSruy6ADH=y93oafRq{ zwdir3=y9XyakJ=go9J<;=rKU_cvSRwT=aNS^mtnI_`B%woaphqMD!RedVDW>Y!E$u z5Z-o^wUd`J(4S(Q}FDxm@%t6Fpanp6vRme zBIB1L<5wc%B$087$mk+6(nLm<$jA{HeMCk-k#UyDI7eihFETC?8JCKTD@DdNBIA0I z@duG{i^#Z5WZWY%?iU#kh>X98jK7MECq>2!BIDm8;|-DVw#ax-A~HS@8J~%aFGWUD zWDFD;vqZ*fkc$jlL$eMDwIk@-83`GClL zT4cU0GCvcUUx>`4$Q&p#hltGKB6GUPT$m7(WZfXLZW39y zimW?C*4-lO&mt>vpU8SrWPK{KMv1I>B5S+I{<+BhjmW-IWZx>XZx`8jiR?d#?0ZG_ z10wqwk^PFueobV*DYD-Y+5ZvQABya+MD{3=Jw{}Y7ul0U_7su*jmVxOvX_hO72>1$ z@5PCU-6DIx$ZitZts=WaWd9^`4iP!O7CFa?oD)RO$s(t-$mu3>az#!bk<(w~oF;P4 z6glUJoZpF@%S6r!$@avvADPm0{XiQH!;A~zv&-w?TPiQKnE?mHrPh{&BNawm)2 zsUml}$ekr}=ZM_JB6q3CT`6+ch}`uecazAi6uH$Rw@&2l7rD(Mw?pLqB=Qasc_)gz zG?AAl^72Jqk;vB5$e4`$6QDi@Y5oZ@0*+6nWJL{p=Na zbs}%S$ZHmPZKBVwM4wF2r$qEQSM<3-^m$tJc|-L1Nc0&f`V1C*hKW8SMV~RE&v?;i zz38)B^r;Yiszsk#(WhSYX%v0hME=i3{*fa87?FRx$UjLW@;izA9wI+o|Jx z$bVSmzbNwG6!~w9{P#ru2O>Wu@&}6i2_k>8$e$|mr;GeqB7cs^|4!t8FY?!j{PiM# zlgQsH^0$lpT_V3iNMc<{O?^e;b zL-hSg^gBfKJ52ODLi9UI^gB!RyHNDISoFJ6^t(p%yI%DBgXnjc==YH5H$e1zO!WJ^ z==ZVc_l4*;L-d=U5d9X4eoI8ZRifWo(Qkw3w^Q`17yb5&e$Ap^n{aJ?wpBnr2R!tEkaxKk7zB8q-3ijEaUCy1hx zMNwx_)KwG}h@!Ja(Yd1NJW+I^D7r)x{azGZFN$syMYoEgJ4DglqG*69dQ=qsRTMof zik=Zg&x@iLMA1v4=zUT2xhNVgibhIA(Kt~wQ51bGil&RAS)yo;C|V|p)`+4VqG*>W z+AWIqh@wVO)GUhHM9~3JOcWm~ihn7Jj~2zpi{g_+aVJrnDT=d2alR-n5XHr!_%u;` zrYJr~6kn1M#n*`9J4EqaqWB(Be7`7uNEAORiXRunPm1DKMDe?#I3NfHO7@BVM~eQx5dD88`X3|uA20fM75&ph|DNK?{=G&2e9`|8qW^88|4X9(+oJy@ m(SMrgzg6_F7yX+>{{#Q;f1IBk^8e&1YyY48|4a1$>Hh=oT{Ut5 delta 30408 zcmZsg2Ut``_lMtA?zKgPy;UjF1a%kbMZw;C!A?htvMVUXhPXTS-cZ*9C~7n&#uSqn zzf==rs_DJ=7-OnQOk&FaS@;b3^8ED?_RKl+&TnSUnVoz6>u&kEyXEbA+sDkuJ*9q@ zNS{0WdRGV6AnQ2mcap&%?z7%(J!U;_J#D?mdaw09>togn)~Br(tuI<%wZ36}$NIkYQ|o8eFRkBNzq9^i zy>9(oL6H<$X{U5nd=;x=D5la?>8^w*5lSzmw_~|6azJTP4lBo$Q_5-OjPjuJ zm~ugRPI*CjO?gZCK>1j?q+C&cP<~RbD}Sky+Ftclt*WU8siA5Q)uzU%t|T>09jp#l z$Ep+5sp>Q}Q_WWk)MC}CmaEn3Qq`rdQP-;L)NSf^b%(lBty3G+M)jb2NIkCJrQWTc zR_{^oRqs>JsOQwj)W_BH>htOg>Wk_t>L==_>SyZb>KE!I^@{qf`knf{`jgg9>!kT= zR!z|i&C~*2T96j1g=vvmloq4KYH?b;mY}6+=~{nnfHqJYsg2S`Yh$!2T81`N%hIy7 z94%KX(dKINw1rxQR;jtQ)!I63leSgcsqNPGY4zG6?TB_)5q%*^ojZueVSgP&(-JYrMg3R>htw7y! zR)0>vs6Vg2pueWSuD`9nqkp7-reD*4(SOx{)358l>woBf>VFxMAsaUtoejlMjc!JO z5om-NJ&i;o$w)Q^7z2&r#%LqMm}*QjrW;vCo{?{q81s#4W0A4ga2akx7#oa@#wNpc z&^Tl?8HbG{#!=&D~W|$dk#+mVEf|+P0nQ3OaIm8@l zjy5Nl)6E&?Of%2SHw${3#ir9-Xf8FEnKkArbG5m~++upn2J?t{%sgS9G;cTWGVe40 zV?JDWu=6{9B|<6{Qn`?}2x+U3?mNBFcecgnXCWD48Os$<+laLTwclfKX zeRHlo^XdjE*hDWUrEl%bn$(oUy3(Hq zsRgfHh<5zZEG;25&Z{{wsqW#Q2gzB=kr5A0uJgH8FBg1ddtk@T)5orLY@_E~E3F>Y zW_C?{Y_dHhH94hrW=(Q@V%?L!evoWU7e22EpVx(tpYVAjeDHnKKVQ@Rjv>wL_`crt zh;J5E`wAWCNGEjb5bMY~@oQIQlyx-G*0I9pE#dQabN_eh{`##)+sW4Hbszp3!nFd&-={*AJk=Bk7}D|E%E{e7F$b*w$2ki9|@n2oBf}J556zw+QmHsoS8Lo zao&~bo19oXt0pcfKG7cXPZ8zTDlemRwRI8ET}}$0&xFtCb-!PaYFlev*(`6Bbv4n} zwZi9;@cFWtez|VS?@?_xTX%Tz)K39a#*UkQK!Ux}1@KJQ&6H_y5l2a4x zA#o}3NzEG^m*m}pf2_d)>mlzPeXUK_!!6@~Cw#uIyZ(Dv+Y{DPUcT!N>zzbf?-o8+ zh0l-8{GYu1S6_Ns9pAjXsR_y6JxxgZ-^`iS*W>p9``myjeObrO=lcVF_zr@(riLU%Xe^NX98>3~#BiS? zt;OMOf5F`~*H6?nDZzW%S|aN1{ByMOa!b(ftwFEX-SpRJ<*k@1t9$s5DV~_N zmh^zur1;SKu{*uV`jL=mvVI~Y%Wi*5g!zR#x5;`*NNrx_G99>Ly=?u;dc}Hv_jpTC zKwBaCc;#k?556yJUUHxITVAFVhxL2w54&%(beE1;uUdcHeZn%>|C;qzFZ>rF$xYVZ zgw)Op@A;<}{+E#2{|#3x)+0U@pI=kDgHw8Z3QaUD~jd?`ZWW)xU=hg|LX6lXk$_Q1C$^yB~VD#CM8%% zN(&{-O9>Z}+DhqRy`n^UDS9(Sb7$8*`o|>i5>6Z+t)#otPbqzr7$sJTQ{t5bB~eLI zl9d!CRq3nrQ__TF3aOir0)-STq!1y62`NHIkwWSzq~1cZ3#pHgVucic%5$-;rP}~y zjF1wAlp>@7LK-5Z5keXxqzOWrDx?`gnkl3#A>{}uS4f3_r;jqed84JSo^T(_;Pz8m ztN@SGYt3k}f;{(nt*jQSyXQ}@mEU58dPYi?!R?D%tZ>hEujOd5dU)RQS_@jNC{LJd z8Qi|Q#p>m8c&%kEmd$gI*K)O3(VjoN*6J23#xtUwWpMiqEmoXoo7dXfVkLOqY-btl zU#IN$ZpkhoB{eB~gp};vk`QITJNMAvsj4(62faa!LP~8?4hgBRH>kUE)Vpst3#s30 zTvm=Nw|MiF!Y&7u+mw^a6we*)En#Y!kopVBoA$~b%ALwxmWj&Up3mA_dc~*v$<5iT z+@su^IU!?oSz*zVN%>WC4=eXk{?CS%sQZ<(oYR`U zdW*OdpDX9Rx}FfypnvOnS~;oAaMNB$gPUaq{ZrQSLQ3~**E;E!lvi3;=2amLy@7wj zi+@wNq+$Q&zpI>7vYPqBoB7@U$^Xd9zp0h~sq%TN{4azw@&^C1m;aTJM*SQAt#VSy zZ^n-nQh%3_LjTGC(aV=w`PUTh5%Axg{Y^+?Z}9){^8XalxPS50Hp)q*xS2n`S$_CG z@M=3RzVqLBwWCYD>2LjNXCY1eFJJW@3aY=5CjFbQDJK<2Gk7igA*BmxieH7WLy-PMp86EgY~Hajwgv~$s%7;2aro;@LBe05cR zfwRcl?T8wtMo>O|LWb9BRUD=E`g=jt-a?voLvXZLa33K}|F>Z8f-LyQf<(2#QdC#p zzo*|^IIX_^>cG~41_^1_je&-G2O1`%+5aABlyXv8);v&V^UB!%x$@(^EAQKipQKK4 zr#Gn`CkrY2@0tz#$M`eUS$~iJFKO9ouD5TFkn)<;JR#+KE7_$MdW97Uso>wj<|!wY z4b8&lGz*LSr?4`wu#T<57OIthkE>P*sp!VM7klwbgjD=*ym#JP-I3mkY?*jMD@~}Y zyi!*RX>OCcT1fLeXF6NL0ye0d-RXzbjp`;LIfUdqtZq@a3TeKO$~-@IwiJZzR`#NEJua{px{^n}t*$kg9}KBP5rQ zR<$ZPq2BH#-KyTEo)l8GkQNDP@e%cudWU+akd_E(sgRa=;<{KGdz@7t`u{>7Rv%Fx z71DAc)e32a=f^IVkii$!Xa2v?XVvG_i$Zb>NeF3W_`vUI{xgS=9&HG@Q35XuDdWsE zKTBzw^w-pP-RZ~FTh%wzH`SBs+v+>b8dnQxjgS@#X|0gf9aG=4sOksmhZfbM3TeGN zM@Solw8?YbYPsF@B^}hu>Q~;Xs+u>y$a~j%Z`30T5z@w%()wEcrlpFC4y#{Np6ll~ zwY)HYX-IxmNJ783_>f8C{Vuoc@j3OXJFST>?zXMVcTN4voqk;XMg3L%P3fopuKuC^ zDWq*e+99MmA;pdn(rzJngtYIthGx;)Xl*qgP10l`9T3tXA>Ayb6GA#Eq&tOl+RJUe zwP_t%zQDHseZSH=dlssep&k8N96R0A<-gj$?(27~zM9t3w#!YfU*EbN4R+5+&0=)! z*3!Jk+uXmdNr|b8e#hvfb#H0j>!vR5?FpT(u91Ps=Oc>{)c1CBKZ z4A({o>9~+?@hmqiVFBZ`iT?_iq)ismtwOrZ^O9}}44AIX@&?TC5^irM%+@l6bV^8f zv=H*O!hdNf(u#$2myquEoHZL~UjgOX0wLWaq!pkZmA3XIKD?oQCU@=ISUrFa<;c{9&C-=RX6d&!K&tIaXj4WIMAF1 zN@I)T(N@Rdx``jg)vfrzPt|U2aX!}SJmJadYKaT4uA1lUGq-izlP!VgTLbTGrX2g! zud8;t#d)FCdB*#EFt9nnG6O6|UC3vWs{N-W?5WnUhrJBX?cFS$0-GNVwR74t?(|!< z$F#?_^V$>I1?@@gDeY;`)&NUj=jXJGMAJoknh5RXGiL%UlPta7cjq=~PYdZKuhJI8 z$J$q3#3$OP+GpD5+85d-?Mv;lCpO3u9P*9!t@fSvJrUYfcedM4Nbd>heIb1+x-EZ60aT+X(4{|CYHfxzi5ovffTe9}4N?8zOGf zJL;X?sfRljb}SNY9usXIKisiI?;_fq7t%-JgO-IfpJlDlimv}7x?M}S>3KBR(p3-C zd$`kY(S!70y}KTwhw5Q^xE|s8HrUcFDpHTqd+NRP-nvb<>(N5`T1eM~ED2c_vLWO^ zAx8;$$eGyg7TKbv>Vw?r$MwE?KRr!P*Zb=O^nso+A(nu4Ly6#7eZ(1Oh^4Ki&t!Mn zp&Rp?>P~CD`06wCTrXj!K1-jiXX;scww~i@2(<*Yo1+)%MSAg>bD@?Hi)#TLEEDvF zSram*Bc4K1%Os}i!M`4rVF^{Uzbz1d_GRUOu=DgTGApnBJ}o_#={322 z{(oO;nyl^fqLT)GI z_CoF;n_B;=cf+*!!JLhf>cXnl*mRo_OmzC+(hw7$#pVYo$Yr+FX0W!-Z<-13g= zklrL@>!B8HNA#mve=BPibwt0J@@W$?GK#92S5ZHt-_jEAy^f#LZ@aO)`XT+)-wyqd zewWv{krcXqkAA;9{fK_AexH6u$fl6H3c1@6{cJlTi57Bz=U@*@e#|-jf;;`F{+Rx_ zeqMh<$U#C57IJqXhaAEB%W8 zwf>EedkVRikb4W+CS<#iqlMh(xc(i{`Vab5qKVdj7II9pu2>=WZGOGsC*(mnqlXRm zIHD{UJLomuH)nCFv7SLaEvCQ4XzNaGGTI0^uF3Eba=ce_fXirababaT86AY2&}4KH za^kFcWmQEL#rboJ{?=@CG5ozjenL)aGOR*Qo}N)&UGdKd-7viohLBU5jIKgXoq6LF zm~;bklM!r$v}ylc-^`=md$#aaNzfVk~Sjc1kd!iWg+-XOQ zQo~_5g*;Zs6NEhZM)#YHa--rO2VtAX4;z(6m5|2?dAz5M%@P@~)L8znfLddPkS7Xx zl4q#R5*o0|So^Plb;f!jPZ4s4$7QobPTOMaai`y6Y&EtS+l?K@PNUA)W$YI6G$BtH z@(dx*6!I(~&lYl~kh5+f+Sp43k;Va|-e@oyo0lTn?dQ%G@=77^67p^#H+X)tS&sXk zFmCgvz^y{gX);aSf+JcEaZhkULfS9p7t@8v=rlOckU76 z8{=E!J0VvHxk|{@FVn&J!MJJ|vqu*el~v6vo>x>M#b(g7{8qua9ET5BKHLtuZGa=g^($Z8pC*N6Q51Ch171BFop7&MF z4NvcoIo__h3(70!RrQ%yUiNRtf4jDDZbZ*1ca~T5@_Zj_iR;kao!exF2w8Y+ahA}O z2(!04{bsX=8EHnDJ<|rX=`EQ+?W8G< zA#WA(jvEPglR3%E_}>CEr<&7*yiLg4J!|7Fp|06x_P+vh%v>Sw6mnhW(DJg%a%a)t zilY4RK^24k=NhD&g=W$JoJrh%57oD>oMi*(2ort>P-pMXrAhzSvwM zL<*VLOvkm`dq&*qlXP2R#Z@3Qc_gW zBE78T2KTDD)?7yibG^BNLUWV3IoHqh+?-$;9n$E{*%LxO_PV*%+-7b!chJGyY1UaL zn42vV&D|c~M9W0iK_MR#@-1#&GyX0*bFaD2TXaMU`DP&>YAHE)cFWX1HxIgVkC=zd zCiAe6n}mE=$VYC}jd7#YT8i+0tIE8s*?aUKUyq)56D=J^{iDGCM=XVw3EqF^Zu4F@ z-uc`jcwj@i>w&y7Keu(sZ zm}F7go;RQHTuZVRBz;^-oR)X&$@LdYN%fZ(LzAM3Z4fw7H z--F=$Ao#uxzMp{aZ_p(gx(tLaxzJ@9blC-6Zi6l-q03q5@&I%>4_z)mm#3i1GtlKZ z=<*u4y1W5h-i9vkL6;A~uN(Lc1;1SI+W>xhz;7@3odCbv!0&eOy95000>9JXcQ5#z z0l%~0_YC+w2Y%0k-;3b)GWfj)es6-`JK*;|_g zx(s>;&^v+N9rT`{_Xd|919}|j37`)GeF*3yK_3nJSkNbdJ_YotpcjB%4f-O`U7)WA zeK+X)K|c=q3D8f0ekbU6gMJ3|v!Fi!`gzbVg8nY(pMd@q=s)7Z=m>^C7|~!PgOLhG z8W{b-m;lC1FbcpZ17iUg6<}0>u?UPO!FUmj_rUl7jL*Ti1jc1Bu7GhBjGw@`2F9;o zTEJ`%W;B>dU`}>{IUCF@Fmu7o2eS;!1z^^K>GcaRSAn?(%ynRH0CNwRd%@ffW<8jV zU>*YVFqlt)`6+bm1YP5x>tyJ<2)eqUtAMV1pzB`fx*xh)>OD36EZtqFq3gZS^&)hA z9lE{=U4MYC*P-hl(5)+U3xjSE&}}Sqn+n~gL$^xkwj8>xfNuMs+hOQ-6uNy3-M)fu zUqe722uOi|z7Vhk0t5uCf`CQ{I1T|Pz!h*30!~4|83;HF0k1&7YY^}T1bheqpFzMk z5aI1PzCvTnKVPP#FX*grG_YS_DB$AZRlL?S>!^1l2=OBLp3SpxYtn4hXsj zg6@N$`yuEd2zms9&Oy)%5cDwweFZ_kK+tax^cMtMAh<0Aw|7BsM+m+Nf;9*ZhTsqg z4u{|#5S$FbsSw-`g3}>*00i%V;QbJM9D+|k@F@ts6N2xC;4=_>7J?su;Kw2OSqT0N zf`5SEA0hZB2>uzmw}tK!ba(kdcPn&Pp}P*tG73V*LdbXs$%l|q2ysHlLI|mZkZK572_dV&6|w7a-(S2zebs-h`0%AmjrG`3OQTL#Pa)T_DsS zLRARWAv6j?dqJoTLZcxx20|A@s2f7JK z5H)o(kdpAUqwy2SE5B2pz#KqY!ZnBF;g?GZ66{L|lZ3w;|$PhYKA<6?$`yi?bqAozx^AL3zqON*7{RC0JK-6#0vmNy806lf+X+qC#&@&Kv z21Cyf=otn*W1wdo^h|)BNzgL|diI5$Y0xtddM<~aN1*4^(DPg9)dhN)(5oBt3V>b_ z&?^#p-wwSy+yTAsg5IY+ocg@Y{;Y`wr{2U`r-;=z^#wp6gCfo%ZT z27_%F*hYeF4A{nlZ4%frz&0Ihv%r=Gwp_3kfUO8@bHU~STN&6Ef~^W{i@{}E2DVzT zxxuyyY-_=`0c@MWwhe4M!L}Q0d%<=9Y>i-R0^3or9S7U3V7nb`cY^IS*zN<{S+G3_ zwnxDB7}%Zw+f!hB7HrRh?Ip0i3br?JVS5{F?}6<@uzdoy&%yR3*sg%>Td@5Awx7WE z3)rrM?N6{_fpm0_>x~J`U^?!9E4-)4)Cx?3rNC0ee2!3&CCj_ENCV2m1oBSAu;J z*q4HRIoQ4Ym0({3_Vr-j1oo|9-vRbrUcU$I`@!A-_CsJl0`_BIKLPfWV7~+GcZ2<2 zu-^~%2f+R?*q;RZ(_sG!>_3A2XNdNNXal0VLUaN|r$h7rh%SWa`4C+W(OV#T4@B>U z=zG8w{SZVy0@3e4^rsO0IrQ;`J_hvZ3Vr%PpP|raIP{qVeVovz4EpSYK8K;tQRwqL z^mzmNyah3V5Yq!IY? zdm#QG#5Y0wLlA!f;-7-}?;!qHh`$aA;gDd5gg%fk1`;wLVHzYyVrZ$%7$zC?sb=auFn#K=O7--UrDCAo(;TKM2VWL-K2o{5~Xq2+6-e zN?S;gASD_XDanwM3MrEzWj3T_K}s#8tc8^Ika7}I?uC>ykn$#^d;}?q3;vW_j&01BJ}+l`u+@ke}R6zpkF-nON4&2q2CX3f z3Sq!}7*GxaHo|~97_b`#+yMjbhXMbA0UyDD%P`;y4D1X8bzBTIVc;MbI2s0yg@I)- za4`&A3Ipq5;LR}bI1GFW2EGIXUx9(w!26>e2Q z0}Q(k!(4yB@cuA-1PmVq!`Hy@tuTB$41XSmzX8MFf)N2QA_7K4!iXFgQ3508!H6ap zaVv~C2_rs(5#PXw?_gvsjO+^|(_myZj9dXD-7xY3jC=t`z67JJ;2ISGqk>@6Oc<39 zqvpVY2 z04C+aq`5Gu6egX8Nsqy#^DtS0$-Xez4<={8R><%`#y-gS2r@21#ub>_AEu6g zsiR=38>Vi6sheQxD=_t4nEF0U>ju-p!8NT1Oe=+Hl`yRurab}Eo`-2K!t`jEo($7d zVfqf3z8|L7!}Jee`X!it8D=EFjC7bW0A>i7u@Po$h8Zuwj5lG%+c2{$%nXB>5ioNO z%yh!cGMITQE@qyFnfJoXpJ3*nFbkNK4zq^CtdTHlEzH^mvv$C&S76q=FzbDo-4kZV z!R!Q>?Sk3sVfIFt{VdFW6=uH`a^J~cb0WyDt%%36iI%NIt)FL0J1)UtV@vf6=Z!4S>Hj{HOTr6vi^W< zz?Iz|vZEn81+p_BI}5UNAUhwj3n6jims$curzc*skFyduaGkhc}`wnJVWglD z3Hj|HKOFMyklzRL;~+l~@`pqID99fJuKe+kKMC>=LjEm~e=p>pf&7Od|53<)4Dz3V z{O2J51;~FH@?V4e-=F{}2!w(jP!I(Ly`dl)3i?4oIuwkCg0WCA9ttKx!DJ}NfP!gI zPyhu*xG0zl1*K5ngn}|CSO5h}p`Zo|wnIT36zqY5eNa#j1qY$vFcjPj1-C%KZBTFu z3eG^mStxi33f_i-ccI{CDEJHJSYS>N%!!0KJz>s7m@^aRcn^*_i(!r%=B$J{4KU{z z%((^Td=7KIg*o3tVLTM3L1BL=Tn>e6pl}@&9)`l(pzwAmJOzahK;c7B_%IYc0)@{( z;qy@V78Je5255Tlw5_9 zpP=LyD7g-E+r!+BFxP;&-C%AI%ngCL;V?H6=JtZQ$uKt+=BB~i{xEkS%pC%Ahr!%J znCphQ$6@Y8n0poGdEffa3x#>%FfRh;MZ>%pD7^B~_18kD{XrSCxL`%wB3lzs}OUqI<)DE%5rzk|}NQ2H~J{tBhPL+M}OaJ2!4 z1djIL=mZX5aQK5m1&0BSZr}(4M+i8=!4V0LUf{5UBL*Dt;79^TDmc=>F#sHc!7&UR zBf&8S9OJ<;2^<;Vm=2Cv;K%|;F1Q>8;3xveTyQwRQ3j5M;HUz}VsI=2M=dzq;8+EY zwcywQj?LiM29BNJ*bR=o;5Yz|MsPHN<0v?egX30k+zyUA!EqWK_krUqI35It_jRD- zF>pKqj;Fx!EI6JA$4lUN6&!DX<85%f2aXTH@d-FS2gjG-xB`xE!SMq)egelY;J6Ns zKf!4Mrw=&WfwLnxJA=~?oC-K~aCQY}AUM0bz!?V49^mW=P8&G;fHMx9iQr5DXFqWE z2j?Jg4h823aE=D&IB-q`=M->G1LsU|W`Z*ZocZ7^1ZN32OTjrGoD0BN3C=~}Tnf(R z;B35K55V~`I6njDC2)QP&Tqi^Jve^^ z=QWrQ%x?qpBVc|s%#VTjBVqm|m_G&PJ7In`%wG)i8({u1n12h*zX0=JfcY=M{I6mD z&oKWNC{v*<2+F$SqHH9TO@gv1P*w?L%b{!ql+{Do%}{n6%3gx9x1sD^DDMvCJ)yie zluw89Tqw_n@;y*~5XzgN{5+Ijgz^`l{2D9(7PNr{k+2{J7R14VJXkQ#1q&RoU?(g% z01Fym!5LWaC@gpk7JLK?F2jN=u&@&>RAHeG3lm{se^@vW7M8-oN?2G83-`go!?5ru zEPN6cz6c9nhK1Lm!Urm3s0fD&J5=<63fEYumJ3$Ps2T%R8BjG1sun?&3#tTE9e}E%P<0Hdo`!A8RsD1>h&q4JkxTwAY)!)D(KUmZi76rhf zL|D`x77c_&b79d!SX2p%Ho>A@uxJk~x*ZnX2aE29MK8mmcVN+buvmh{zOdL27Wab1 z@vt}%7Ego4Ij}en7O#fITVU}v7c9OL7N3R155VHrVDbB~_(NFyJ1mi4Njq3#gC&Wu zBpH@WfhC!+Bpa5jfF!II0cv?DB4V5tU6hr-fvuyg_} zt$?Mj8dzEjOB-P6F<5#FEPVo&J`YP@gk=_3))AK71k2)KSsE6qX%? zWw*exTVdHnSoS(BdlPCbP}31=Zi1R*s2K<~gP|r1YKowy1Y9*+p~eF>`=I7AsCgD@ zE<(*Ou-pR6+rsj2SZ;^qePH=aSe_5d=fLt!uzVLR-vi4ZfaT|5`2|>h1(yE=%dbIg zSEvnx+6btf1GP@5ErZ(Iq4qvp)ZP!ZA4Ba|Q2RBk=mINDSkVnuq{E8guwo>vD1a3X zSTP?~tb-NXVZ}~ZaTZoQ1}o0PiqBxhH?ZP6aGBr=1()|JcLca5f@`ujjiMV39h%n9SrU$aQ6at6}W4`?E?3s;C>q1&w?;Ogn$SGQ2@dLVm^om zK%5700ajXJWdN)Uf|U+fSp_Q>!OC;6@)=m^dJa~#fmNMgRcBZ=4pvQrRWo4KK3H`a zRvm>^AHb?hu(gNU23Wrn*6)J#Z^QaeVEt#f*kFSV ziLfCVHmrsXTVTUB*zhcDcojCh4ja3`MiVx6gN@^1<8;_K6E@bt#(LP;2pd0wjhA8L z71$I3o1$S;3~Z`^O*OEo7B)Qun=Zhnr(kn?*zE6u%?fND4V$OH=Bco`0X83l&9}hj z?_l$_~^6&R(!H9(E?e&W*6M4tDN_o$tWTPr^=s&Z-G4$?D2&?ez0dAF7{Nwo+{XL2KGD(dmaN%C-A7? z(ZMqdJO$t>1kZ8s+zFn$!Sg+Megn_%us03%4uicTVDC!Uy9xGgfxQ=D@9VJlP1vWx zz987w9roqGz7p8y{r=x~8|=FW_T2~jf&Cp|e<#>K1NP^^{sP#41ooeV{ik66W!Qfe z_WuM2bT|+W2YSGPNH`D$2NK~xG8`BT2Zq6ck#JxP92gG=Cc%MBIFJJe^5KB15Dt{U zfl@fI9}YYL2Y!V5aH!9K`f{jW4)rUbUO@e7s9y*58=<}t>W@SH38+5>^>;!2Jy3rR z>K}*tr=b2>sDB>nUxE79q5dtX{{rfNgZk^>YVd&ue`rvk!GwkYXb6UeaA=5xhF;JR z4-FHbVFonJf`%+;$c2Ul&`=2t)zGj68fu_nH8iY+hNq$7WoY;S8a{%COVIEYG<*XM z-$TPM&~P0W4SzzT1sdB!V;5*lgvLqGI2#)0Kw}X!&V@z?G%kV0Wze_|8aG1Y7HHfK zjdjqt2O9T5<1uJF0gWf2@f0-P35|C{<2}&$HZ=YW2ZP|?P&hcp1qau|!T)RFzN54t zk8pwi#%Q8g5~ERR3(K<0R$vPniM?xxV%KQY7&RJ=F)?av`7F^GyP}4u(R|<1dsm7e zh#=Ttm)<*UW7%D{@9I6=d*`1yXU_XPXU>^F=6&8d@4Uf>!$_V&vOCE%lIbM-lI%w^ zmt+CSfh3Dbo=Wm`l4p=So8);UFC=*h$zPJZoaB`xuOWFo$zPNF4awU`{*L5bB<~@a zxR2zYNdAT7LnQx3@^O-XC;2qVe~|nq$rnk!Lh^NzhZ22<ODUu^ejv+ao zM&AAkUEmo(WH(ebv&sPNu5lp8>yb8dXvf^l|?E?LMors08&My z29f$Xsb7#fi`2QKE+BO=smnTXi^lDePNpGp0d z)FY%GBlRSyr${|Z>UmQCN|1V))N7>PB=t6_;iTRp^#Q3QsS%_`lNv{ABB?2)rjwdQ zYBs5Pq!y4`MCx-=Uy%Be)HkG7l3GP-EvXHpHj~;$YA2~Zq!JaRs!7$7swdS*s)bZL zsqZKaN)M;>N0fG<^jJ!NLg@*VoT6>X&$BhDJ`V5gwoR}9Zcz& zl%7NB`IH{I6_s8}=@3es;z~yRnh`fM;%|(2ni0=3Vmu>eNEk7T5o;N-jS)K-iIGP! z@@Pg5V&qwjJcp6bGV*0czRJisjQos|pEI(JQ9oeR5sb=aR3W2^8Fdq*?qJkijCzq# z!x;58qZTvjYes#`sHOy?zGrkWx;vx$GCGUV7c%;CMh|85pBVi&MnA^r;fzi)x|Gr1 zFnTSc*E714F+XI?k&MY>ObKI7Wz4ONxtlS6V9Y-m^EzYRWXxp7e9V}{9L8*9%r3_4 zVN54uf5_M)8QYh!1&kfQ*y|a48)I*0?2C*Y#@M$RJC(6>7(0)#I~iNe*!_$ z+=+}UV%#qncP8U*V%!~!yNhwpkr?+X<6dXnB*uNjxY>-`z_^`^+s(M|8Gj_>yD+|p z@xNgFnT)@c@pm)+4~&12@xvJZHscpC{tL!0XZ$wCS1`Vc3Eh~G!GugET+D=_Ot?zI zgugQ3NhTziFq{cVCX_PaD<-UF!dfQOGNFYDZA?6#iQSmkgNdgy@oXlZ%fx$__!lPr zm5Hw~@f{`(XX0!oE@I+hChlWm9TOXvbRv^_CYaQVNvAXEJSJVhq}!Nu50ma?(rZi_ z&ZKvlG>b_Kn6!{dJD60(q#7pwjLB(C?#<-$nLLEamoxc(CO^XDN0~gD$y1m-jmc}6 zyp_q@nR00UH>C?xj$uj;Q;L{U!jwCiaz9fZV9Gm8`H(3|rj#*dBU3gr^#@EnhN;If zwUDWUnR*6O?_}!zOnrc@jqsaWyW}BEMdmC%vi~cDrOvHMiVo8 zGBb;r+049@nO8CM8fHGh%;%W-PiBr|=5%JxWac_%ZfE9BW*yF~W102i1ha~nbq2G} zV%9UvdWl)DFl!yNwlixdANA#<0zMkRN00N-Kltc*K3c*@-}2E)KJLZG*?gSK#}D&y zf{&l(<8S$R9UpICb|$m?GkYMj?_&0$UjeiK%C9&`T7oZZaX&zu9y?a$oPn0q>NA7$<{%>4&*7czG_bH8F<7v`P7yq_`edgk56 zyxU34dzX2o%p1wP&CJ`wynTF<&L_EilFuji@ySDc@(7OU*KV*Il^P8C8!h*9{a4`!mWx;DK7%pMKyDZqpf;tv7@aX_P{W+fw z=FZ)Pk58Lfh=qr-@In?|&cdNAe1U~;uy7a)*RXIa3%9czyR!JV zEdB$F?_=={7JtIx1uSl4@ppWV&#&b3U-S9Rd|t}u6Zm`*pSQ3iSd#bwONOxIT9#bT zlJ{6Lf+eF^(#VqUSc;|Rvh*^R4q@qQEFI3$cUfA=(gv0`^2M2aaS>l!!WXab#XEd4 zoGBW7SuD?H`J*gr!vwXXVZ@=W*uUYY9RveoDtmwvyJ6Q2YR{V(-b6K&N z6-!w8BUb)|l|NnT4-c_-!HvuZG_E@suGtQx|q%UN|jt8Qe~hpd{ws!6Px%BtzC zn#Zd7tUjLAX{_$e>I_zAvO3Y9)dN|5H>)3F^&_l)jMY!D`Z-qrlhq4Z{SB*Eu)3Vp zt69B;)!SIpgEcv<$zx4_)(m9L>8$w$YaV3HQ>=N0HP5l;pR9SEHE*(JIcwIEShJor zn^?1jHG5f8!P)}W4rc8ctUa5x=d$)P)(&Cq^Q;}l+P7IdoVD+=wv@FaS-X+7`&e7a z+8Wl@vhFn2UBJ4FSa&Jwe#yFFtotA9QWDmUWZh`i9Xi~>y6;#Ytp5S)k750BtiOu& zx3d1XtiOZxcd>pF>*uom6V`vq`bBK$!iJOC(3K56*pSACer(8L!~JY{oDEO1;VCve z!-kgIEsy9*tnmK?QHC16E+>jrlZ+(ESoN1)0J#WT+OEI*mMJ%e#@rcv1v4$X0T}% zn`X0VE}Is!X$hO!*?c6MyRi9KHvgE-r?9yjn{Q?FeQds;&3|U|gKU0+&3|X}Og1lK z^I|qHW%DvNmyy`KiY-0alFOESwhUlPAzKEs!z}9-U<+ANGww=zlGuU<(+b(9?rEE*E?Pa#T%C36Fc93m{^xyWOZ2t}0Z)N-KY`=r;e`Ncg*xtd8qu6mYJC0+=PuS6w z9o^Y+H9Kx&$L;L6lO4Zj#{=y63p?hpV<|h9vEwUte8Y~l>{!pv6WH0CoqgDu$Yf_f zb`E4`5j&q^=PT@djh%0@^DTD%mz^K7vyPqLu?xF?z^)_MbsW2X!mgq0`VG5oW!LTO zx`SPRWY?eAHIiM^*foP)AF*pTyB4u)F^SzLvOAsK8SKttcQ(5V*|M&<#4Ynir-Oj z7ZvwVaUT^AQIU9tidU$3mx}kP7)iw#D#lYWiHfOI%%S2FDn6y+Gb)x)@hugbsMteA z0~L)_d`D$ac{r6nqOuE>Cs273m0hXqL1iy0v#Bhi@@x{7=TdnYmA|5LD3w=Jc^#Fv zQh7U-cT#ybmG@HlJe3ovoJ-|SDyyk%q_UH$E>s;u)z7Fpg{tmUrBRhmRW4NpR1Ku6 zn5t8$I-9CXBvf5X)%8@}M%C}Ax{IoNsJf4;hp74+RgY8kcdDMI>P4#FqUyg?jiYJ; zRWqsjn5wx{&8KQ1Rm-XRnyM94l~c8bs;yM*qpFUo@2Hlb`Y5W8q58*E|CH*VQJqG0 zI@Nut?niYl)x}hwL-l1;Ur+UoRNqeZomAgV^}STzPxT{IKSuSFR6j-avsAxK_1jc` zNc9A&CsF+o)ra1Hsz0InQ>s6s`YWoxrMisj)l{#edOOvXR5w%IO3h)^96`;I)ErIC zanzhlO*d+KQq!B73~KVJ8AQ!_)LcN#uc#SH&DGRgN6n2SYHp|IPHOI^=3Z*Vk79oz`v|ezG73^=JHmL1F?a9=3rM3^Xnbc-e zn@4SbYEPqfFtuk=dk(edQ#*v(YpK1R+B>NIBef4u`yjOsQ~PLw+GnVJj@lQfeTmvv zsU1u03Tn4-;D;ReF$a2ZAd3UV92mrbvp8@r2QJ{i#T>Yd16On4Iu6{(ftxvSD+eCq zK#~KKIk1)k+c;3ofhOt>>A$)os5_3jMo=1&xe52 zjizo2b#tg&Ox;TA%BkB#-B#*$P`8`9ebm)acaXYf>e{I5r2dB_>QA6Pjru(53#dPp z`qQaDgZi_nKacufQhz!1S5kis_19B>8};{4{}<{L)IUxAi`2hD{p-{Zqy8Q0KcqfI z{YdJ^P(Pmf8PqSN{!0n<>!{yA{Z8ulP+viPHTAXBw@}|s{dY734TsZkEDa~o(2ItA z8v4_48V!SKIFp8RXgHsSAv9b;!&Nj~OT!H`{Fa74(C{D)Ptov9f`*r9c$J1XXn2c; zf79?k8cJyxMZ;JcCeSdGhWRvnLBnbq*3z(zhMhF*p`n6?Y8o18XrZBA5#GdXw;2hZo=5Ds3!!K*lUEeCJl;Da1|mxE(CxPpW0Ik=aD z^&I@3#-OnajmOgX6BfqUkJ}&ZX%B znl7g4GMcWY=~kNVmeBMNO^?v@6iv_4^gK=fqUmLt-ll0dP4Ch40ZmDo#?dsLrg=0i zqv=bUR?)PUrVTW0rfC~Z6*N`TR7+DmO^q~tM{^gNPoTLs&3zIy=h57s=0ciFXg-bR zb7(%F=8I^)l;$BcUrX~XG~Y|}A8CG==0|CMg62a!&(Qo5&9Bn@2F-8L{BN57NAp;k zr_sEC=7lsbr}^tcxD_;))4Yb}tu*hTc{k1bXs)99AT4M)ik6dTIfa&VTKdw`kCt3o z3TQc%meXlDgO;;tIggfK(sBbWzoX>=TK+=IuIzOruBRYtryXHDXl|jy@J*oXuX5h`)Pfg)+cFwp4NZS`ZBGr(fTH>@6q}J ztw~x(&^nsdDYVX|buq0gX)UL96Rlfm-9hVaTKCafN9#da57n);jn+=uen{I-XzNB> zKiYC=E23=>Z9k{&7qp#4+r_k9M%%Aw8%o>NwB1bGJ+%Fmwx?-(mbRB^dyTd?X?vTt z;j|@b8$sJ>+Q!kAm`K|!+7{6E1#PQoTT9zE+IG^mhqemZs%dMat%bIB+P$DG}{TUAMI7N@29<)_BPr(=|IO1 z=s1RsZgljM(2+w&5gmi*_&FWFpyMn$E~eu$I(|jRP&%%r<7PVUqT>NNo}?o|#|w14 zM8~Uiyg|oXbo`f&|ItxO$0$0+(lL#W`E)F!V+|ea=-7UUU>6;G>8PZmhK?pWTIuMZ z<9j*}qw{DwPoy)Q&I~&9=^Q|35uJnR{5hTH(s==$7t?teoxh^Fif&^>w{fD|bkS|5=+-Q{A1=E8P;@^^bU#{jKT&i) zNpv3~y5A_e-z2)NzFKr|6+M0?dR!oS+$DNEBYJ!$dTbUw_J|()M2{-bW54Lp zE_!r|p2vxv$BUjPik>Hnp4~*xo}y=O(KAEz%o06M6FpB)h@QU?JyG5kkCerQ{X?KgX#JwWz0g?8gNPGBDK%~7a(q0v5!$jIUBJDpS z?E{gP6lo(w+C-5yN2JXYXqXiok+xN&?GS0ZMcSc+TSZ!jNc&#& zI!yFBLi9RPBzhe!dL1Wv^%uRa6us^gy`B@jMu=X^M6YVmyNl?3vgqAa^v)K&^F;6d zqIaR_T_Sp)CVCGRz0VZAhlt*ni{4j?-q(oU*NNUYh~77e-nWR}{}8=LiQcnC@68g? zyG^90iS!Fa`sE^hs7Sw3r2kr^-z?I9BhvpM((e=LkBjubi}a^O`aeYaKSlbBBK;MS z{<=v2P^2eC`UsIeTBMH?=@Uiz6p=n%q^}q0ouW@4(dPou=l+D~^Q!1GQ}p>n^jRYM zl#4!VM4zpqPo?NnEBdsFJ{=z1xuWl4(RZcjyH51oBKqzX zeJe%ZD$(~4Uz14mZ5EkC<_|>X;Ue=`k@*vmdA!I>6Pdk5W4g2=0K5on#de1 zGS3j1XN$~Vi_AZZ%r`{l2O{%-B6FvbEe3gFESU3%*7&esmNR|5t*Ar z*5M-S1d)|1vQ86Or;DsJMAq3N>pYQlp~$*kWZfaM?h;wQ7g=|Utc1vVS!BH`vfdC` zZ;7mbi>!A=)@YG6R%DG6S>r|4$0BQv$XX_{z7kpAimb9zMAmANwN7Mh6j@tDR*lFy z@b3k0#L0=nM8D%jzf(oOOGLk0MZdp`eksv!rRY~F`ZbGwt)gFt==Z(IK1^gEA+k>v z*?mQJmdMT#+4&-SfXFTq*=LCCOGWlCMfT+)`%000jmW-UWZxpP?-kj96p8EyMD~Ls z`(cs&sK|asWIrpiUl-YLitM*V_HdE?p2+?{WRDlw6GiqEkv&~x&l1_QMfMVr{e{S0 zDY93I?6o3$gUH@2viFGW3XxqcvTH?ly~u7CIY&rD&QC?oi6W<~$mt<+dWoFABB!6o z$rU+;BIhiTbB@S4U*udQaxN7)LqyJvBIhQNbF;|#jmY_f$hlACJScJ=7CDcKoF_!i zvm)ntk@GK+^Rmd9CUQ24TnUkTtjPVb$o;9v{h7!;MdWrDx!EE&SLEi2+qXwLMc!{j-fbf9cOvgDk#~>CyH6zY{v`6A5_!*x zyyr#U3nK4Dk@t$odtKxW6M655ys;v0k;q#i@-~XRI+1^($j=t}#UlS4k$;}Zzfj~~ zBJzJJ@-G+p4~zU~MgDUl{{@l%lE{BmE-;s#?QjtGWm{Q7}am%o7DGL_xLa-$nF4N%Sug{Vx{% ze=YjoBl zi-CU-1OF%nJ}d@4BL==C3V$dH2Z+L-i^9R8@Jvy7jwrld6y6~U?-GT77KMKmg^!5B z$3)?iqVOqEcnJTBD12QM4iklMi^AceaGWTdAqwY-!le>XxJ?w+h{9S?ST712MPZ95 zY!gK%i=w`wC`%OOh@yN^G(Z#;ilPfe(N&`88c}q;DEhT1`i&^MO%&ZDitZIf4~U`% zMbX2e=vh(poG5xx6ulyfUKd4gCq&V3QS_cD8Y_yXiJ}FfXrU-tB8rxYqOU~JN>Q{* z6s;9SJ4I2oC~6f&?V_kd6m^Q?E~5AtQT$_3{8LfGqGXjQ zSu0AmiISb7WREC0AWAw#$@gN=VPen`Vo;hG)Z6c4P#-ZUQw+)$g9^o 1000) done = 1; } + gravity.x = avg_adc_channels[ACCEL_X]; + gravity.y = avg_adc_channels[ACCEL_Y]; + gravity.z = avg_adc_channels[ACCEL_Z]; + + std::cout << "Gravity: " << gravity.x << "," << gravity.y << "," << gravity.z << "\n"; printf( "Done.\n" ); } @@ -347,27 +369,13 @@ void reset_sensors() void update_pos(float frametime) // Using serial data, update avatar/render position and angles { - float measured_pitch_rate = adc_channels[0] - avg_adc_channels[0]; - float measured_yaw_rate = adc_channels[1] - avg_adc_channels[1]; - float measured_lateral_accel = adc_channels[3] - avg_adc_channels[3]; - float measured_fwd_accel = avg_adc_channels[2] - adc_channels[2]; + float measured_pitch_rate = adc_channels[PITCH_RATE] - avg_adc_channels[PITCH_RATE]; + float measured_yaw_rate = adc_channels[YAW_RATE] - avg_adc_channels[YAW_RATE]; + float measured_lateral_accel = adc_channels[ACCEL_X] - avg_adc_channels[ACCEL_X]; + float measured_fwd_accel = avg_adc_channels[ACCEL_Z] - adc_channels[ACCEL_Z]; + + myHead.UpdatePos(frametime, &adc_channels[0], &avg_adc_channels[0], head_mirror, &gravity); - // Update avatar head position based on measured gyro rates - const float HEAD_ROTATION_SCALE = 0.20; - const float HEAD_LEAN_SCALE = 0.02; - if (head_mirror) { - myHead.addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); - myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); - myHead.addLean(measured_lateral_accel * frametime * HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); - } else { - myHead.addYaw(measured_yaw_rate * -HEAD_ROTATION_SCALE * frametime); - myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); - myHead.addLean(measured_lateral_accel * frametime * -HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); - } - // Decay avatar head back toward zero - //pitch *= (1.f - 5.0*frametime); - //yaw *= (1.f - 7.0*frametime); - // Update head_mouse model const float MIN_MOUSE_RATE = 30.0; const float MOUSE_SENSITIVITY = 0.1; @@ -510,7 +518,7 @@ void display(void) glTranslatef(location[0], location[1], location[2]); glPushMatrix(); - glTranslatef(WORLD_SIZE/2, WORLD_SIZE/2, WORLD_SIZE/2); + //glTranslatef(-WORLD_SIZE/2, -WORLD_SIZE/2, -WORLD_SIZE/2); int i = 0; while (i < cube_count) { glPushMatrix(); @@ -592,18 +600,25 @@ void display(void) for(i = 0; i < NUM_CHANNELS; i++) { // Actual value + glLineWidth(2.0); glColor4f(1, 1, 1, 1); glBegin(GL_LINES); glVertex2f(disp_x, HEIGHT*0.95); glVertex2f(disp_x, HEIGHT*(0.25 + 0.75f*adc_channels[i]/4096)); glEnd(); // Trailing Average value - glColor4f(0, 0, 0.8, 1); + glColor4f(1, 1, 0, 1); glBegin(GL_LINES); glVertex2f(disp_x + 2, HEIGHT*0.95); glVertex2f(disp_x + 2, HEIGHT*(0.25 + 0.75f*avg_adc_channels[i]/4096)); glEnd(); + glColor3f(1,0,0); + glBegin(GL_LINES); + glLineWidth(2.0); + glVertex2f(disp_x - 10, HEIGHT*0.5 - (adc_channels[i] - avg_adc_channels[i])); + glVertex2f(disp_x + 10, HEIGHT*0.5 - (adc_channels[i] - avg_adc_channels[i])); + glEnd(); sprintf(val, "%d", adc_channels[i]); drawtext(disp_x-GAP/2, (HEIGHT*0.95)+2, 0.08, 90, 1.0, 0, val, 0, 1, 0); @@ -653,10 +668,13 @@ void key(unsigned char k, int x, int y) } if (k == 'h') display_head = !display_head; - if (k == 'm') display_hand = !display_hand; + if (k == 'b') display_hand = !display_hand; + if (k == 'm') head_mirror = !head_mirror; + if (k == 'f') display_field = !display_field; if (k == 'l') display_levels = !display_levels; + if (k == 'e') location[1] -= WORLD_SIZE/100.0; if (k == 'c') location[1] += WORLD_SIZE/100.0; if (k == 'w') fwd_vel += 0.05; diff --git a/network.cpp b/network.cpp index e386ac913d..7cc72a53a4 100644 --- a/network.cpp +++ b/network.cpp @@ -73,7 +73,6 @@ int network_init() //from.sin_addr.s_addr = htonl(ip_address); from.sin_port = htons( (unsigned short) UDP_PORT ); - return handle; } @@ -94,7 +93,6 @@ int notify_spaceserver(int handle, float x, float y, float z) { int packet_size = strlen(data); int sent_bytes = sendto( handle, (const char*)data, packet_size, 0, (sockaddr*)&spaceserver_address, sizeof(sockaddr_in) ); - if ( sent_bytes != packet_size ) { printf( "failed to send to spaceserver: return value = %d\n", sent_bytes ); @@ -103,9 +101,11 @@ int notify_spaceserver(int handle, float x, float y, float z) { return sent_bytes; } + int network_send(int handle, char * packet_data, int packet_size) -{ - int sent_bytes = sendto( handle, (const char*)packet_data, packet_size, +{ + int sent_bytes = 0; + sent_bytes = sendto( handle, (const char*)packet_data, packet_size, 0, (sockaddr*)&dest_address, sizeof(sockaddr_in) ); if ( sent_bytes != packet_size ) diff --git a/network.h b/network.h index e06f416b9e..ea3f98cd72 100644 --- a/network.h +++ b/network.h @@ -25,6 +25,9 @@ const char DESTINATION_IP[] = "127.0.0.1"; const char SPACESERVER_IP[] = "127.0.0.1"; const int SPACESERVER_PORT = 40000; +// Randomly send a ping packet every N packets sent +const int PING_PACKET_COUNT = 20; + int network_init(); int network_send(int handle, char * packet_data, int packet_size); int network_receive(int handle, char * packet_data, int delay /*msecs*/); diff --git a/util.cpp b/util.cpp index 89fdcc8900..90dcf451ee 100644 --- a/util.cpp +++ b/util.cpp @@ -26,21 +26,23 @@ void makeCubes(float location[3], float scale, int * index, //std::cout << "loc: " << location[0] << "," //<< location[1] << "," << location[2] << "\n"; if ((*index >= MAX_CUBES) || (scale < SMALLEST_CUBE)) return; - if (randFloat() < 0.5) { + if (scale < 3 && (randFloat() < .1)) { // Make a cube - for (i = 0; i < 3; i++) cubes_position[*index*3 + i] = location[i]; + for (i = 0; i < 3; i++) cubes_position[*index*3 + i] = location[i]+scale/2.0; + float color = randFloat(); cubes_scale[*index] = scale; - cubes_color[*index*3] = randFloat(); - cubes_color[*index*3 + 1] = randFloat(); - cubes_color[*index*3 + 2] = randFloat(); + cubes_color[*index*3] = color; + cubes_color[*index*3 + 1] = color; + cubes_color[*index*3 + 2] = color; *index += 1; - //std::cout << "Quad made at scale " << scale << "\n"; + //std::cout << "Loc: " << location[0] << "," << location[1] + //<< "," << location[2] << " scale " << scale << "\n"; } else { for (i = 0; i < 8; i++) { spot[0] = location[0] + (i%2)*scale/2.0; spot[1] = location[1] + ((i/2)%2)*scale/2.0; spot[2] = location[2] + ((i/4)%2)*scale/2.0; - //std::cout << spot[0] << "," << spot[1] << "," << spot[2] << "\n"; + //std::cout << "called with " << spot[0] << "," << spot[1] << "," << spot[2] << "\n"; makeCubes(spot, scale/2.0, index, cubes_position, cubes_scale, cubes_color); } } diff --git a/world.h b/world.h index 106f47ff69..b59b19cb88 100644 --- a/world.h +++ b/world.h @@ -14,7 +14,7 @@ const float WORLD_SIZE = 10.0; #define PI 3.14159265 -#define MAX_CUBES 2000 -#define SMALLEST_CUBE 0.01 +#define MAX_CUBES 0 +#define SMALLEST_CUBE 0.1 #endif From b78cc04bf5b4518777e6542d8b5befc4010b23d7 Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Wed, 28 Nov 2012 16:28:14 -0800 Subject: [PATCH 021/136] Go fullscreen on startup --- main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/main.cpp b/main.cpp index 256a550058..4cbd6d7281 100644 --- a/main.cpp +++ b/main.cpp @@ -242,6 +242,7 @@ void initDisplay(void) glEnable(GL_DEPTH_TEST); load_png_as_texture(texture_filename); + glutFullScreen(); } void init(void) From ff0cf7e13c2c2195b9506bfc489689ce30771db9 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Thu, 29 Nov 2012 15:52:47 -0800 Subject: [PATCH 022/136] Updated maple PDE to have Roll gyro and led latency tester --- hardware/head_hand/head_hand.pde | 25 ++++++++++++++---- .../UserInterfaceState.xcuserstate | Bin 104499 -> 104737 bytes main.cpp | 1 + 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/hardware/head_hand/head_hand.pde b/hardware/head_hand/head_hand.pde index 2dbdecdb86..16be76ed04 100644 --- a/hardware/head_hand/head_hand.pde +++ b/hardware/head_hand/head_hand.pde @@ -8,10 +8,15 @@ Read a set of analog input lines and echo their readings over the serial port wi // 17,18,19 = Head Accelerometer -#define NUM_CHANNELS 5 +#define NUM_CHANNELS 6 #define MSECS_PER_SAMPLE 10 -int inputPins[NUM_CHANNELS] = {19,20,15,16,17}; +#define LED_PIN 12 + +int inputPins[NUM_CHANNELS] = {19,20,18,15,16,17}; + +int LED_on = 0; +unsigned int total_count = 0; unsigned int time; @@ -29,6 +34,7 @@ void setup() accumulate[i] = measured[i]; } pinMode(BOARD_LED_PIN, OUTPUT); + pinMode(LED_PIN,OUTPUT); time = millis(); } @@ -36,6 +42,13 @@ void loop() { int i; sampleCount++; + total_count++; + if (total_count % 20172 == 0) { + LED_on = !LED_on; + if (LED_on) digitalWrite(LED_PIN, HIGH); + else digitalWrite(LED_PIN, LOW); + } + for (i = 0; i < NUM_CHANNELS; i++) { accumulate[i] += analogRead(inputPins[i]); } @@ -47,10 +60,12 @@ void loop() SerialUSB.print(" "); accumulate[i] = 0; } - //SerialUSB.print("("); - //SerialUSB.print(sampleCount); - //SerialUSB.print(")"); + SerialUSB.print(sampleCount); + SerialUSB.print(" "); + if (LED_on) SerialUSB.print("1"); + else SerialUSB.print("0"); SerialUSB.println(""); + sampleCount = 0; } } diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 375a7f64bb1c5b43110c7bd80872051036926bda..e02456d7171ff4467da391dddf0a8a01ec236aa8 100644 GIT binary patch delta 29999 zcmZtM2Y8g#);|1S)EN&d5FqnBLl4D(CZTtbBG?d-Dk=m*fKU>8FEV+=E@DBARLz9m ztMz#79(%#wdp-8vj(%74_4vN$`_FaFHEZv6@3ro|*IIk_Q}*wPSL};l6&(K08YCJfnk1SgauO{PtrKk$Z4>Ph z?GxP-JrXA*dM0`$PDz}aI4#jXF(ffGF)T4EQIHs&7?YTon3R~Dn36$mFPGL2^Q}I5{y{lAM{Gmt2}mCCieRBri=~mb@mpHknRtNp4T>O72bW zOWvHkBY9WyzLhJh_sMu<<#W~3M_v)dRz$Iz_b;uHdZ+%x6H4xX?AU_EZQFP5*fuY_ zZO0Cs&O3K;+YaqJwB29ls})DqF8-jypruESTHLN{*G_raZM$@Cvwz=@yR(kYS`)=) zMX}4H*z#LSYh?6_#V7XoaNo3H<$LR7)JxSGHhuiqh1p~0WVdb8twY!BL1$!se9q)) zb0*9x9y@NrA=Mo1o~*CSx>u-KcK9=@Im*$3tnagaE9xZl#vwo`6HS3qGe`oy~ z#jcEES4XjxQLJ?oyDo~AN3m6#6A~GT!xD!lj!483@hG+?iro~&Hb$|nQEW%ED7Ghx z?XR+1Xsn_{HRH?bMX{^ORQWD8F;Ttz-^q-&E6;nSR<%UtAKGim_hw}bsh;?wb#0k3 znO#>7ex+4;&qPMzsCs{NUtebB=P#dDEz$Uo_6_B`6B&d5*8Gpw>M9ex%1;S0>eN3j zkyq9woj5*`8^zW}v32Q0exg+rTOY-4EWa(tIIe!jM3*W+r$px{RuRS0=|tB=w?X(X{@6#F_sE7?c-;}f^VTxe{*2c}|A|*t;?h6VxBnH*D^^C&wOx7V z^M5PMx`1@agYghJpqfb_%{13%H{!*-|vfH=L`acwR|D{N;y!+L5D;vL2 zqkLuEj6|KSe*o_L3$SzLr!O~KIq1Dc<-gR;IJ(ZBKL8K>1vs#B`SX3V61V)J_~&1W zJIZI)%Q&&lKmJfW{Fmb1^2YTu1`dB9@kCjVEr|yc|4clTcsTJ$;?cxoiN~YZqfzX! zDE4?1dm@TG8O5H8Voyi0LtEr0o=Q9|U(LkhnkSx*j>wE+&qT3jqa!lQ4v%7o$}*$a zbGO`EKcibl>g}>I6;-wl{io;>?v!^B64j}!k&e3JMy@mUmmK8n2%#a@hJ zFGaDJqu47^?A0jt+7>yQC%#I2o%km4t>%gE%L=OOzaGWjh+^+W@zcvPqxe~4GJ`&| zXHT9zXY8~&Cyk#xXZoz!1I8|#KDT^v!;A-0hX=8;9u-HO9UXR8MG%i-Z~jl>1l7vA zr-P${qodecQS6;RDIOEl2x^seNgp--s0q#;dFA?O+O3VH{9f)j(jQS9?5_C*x?GKzf_#lDVW-$b!* zH)}3GI7M@h2L0uiW&R$^@5(aEIz_SXqa!lMWHu{bpPdnuugK1Le)90L?l=9pbZ3`! zzv-_zjSR+Bc^nlK1fzp7L18d9D2ie~MzNow*w0bymnin{DE4a<`)#uv`A1C*CI*v& z$-$Jef+~-P-5bcoaXP%ID~&8A16wO)|bNnWI|9z+mpEfkTIkoi=eo-|2Iw z&57cN9Wq5pFh5vu?!cj^O&dR9!O#hF(!oO0|5rOR<52L&@Fh_^_Q!~&L2689t${<& zm^)`|QR##~hgJq;En^ zDF32a#w)1}!Nw?F=cYecZ4S1K`VXg}6XsL~n}e;U4<9&m=!7}HuXwN_*zpJc_y4kB zcUkv}U{4ee{*1@|s*VFuJp3~##{{utQP2-jM#2 z#}ApaaPY)Gjo3>oEIXt{74}S^%9sC;n778=M!=iYrDBe1Xw~69yqj!#?6>9=ub1pgzdr(e{tv-#d|-K`tu|cb_u)v#iM%^ z@AK!dAnY0TDeI9A^Q-*#jN;v^#3~n2yyu^wW5SceQ~$DkS`_d5rz!{sglGJpMGgmr z!^(P8hJ(W)^~0f2{G=#;auh$MQhs<=IO6xEJ2i^;FK=*M#?nRw;h6sfFD#7Wr~PGj zTsXcez|DpR;iPcNe>Ui_B#ICCpR5k2m36NSr-w7bnNj?VC_X5P5B<}?G2xtW{{P)~ z!v*2OC_XTXpIP4O_>7$TOT*~D0OyCxqWIt_KBRp5@fl6(UlLyaUx4M|6;XUx6dzu` z{rHTW;n##~%6e=GSBBSy*M--I<>3wCs&I7_9}&gRj^gJ;@pGg2$S6K4iWfxj(Ocw) z>ok`W-V|1Z>9F$m#TZkTSym9mFNosTNAdC~eq(t~ZpP+XyTUzH5!)Tb3oF9CQG9Iq z>$w^2`W*~!sp`HtiWgOcw?^@C!_F8xdD>r-{(E?LS(nQ2&hQ`MT~T~|6rT{qiz~x> z8r9amEqUnH{rKY zd`=Xf7sclvQZ4*G{2|Oe_q6d7rp=jLJbA*bC_XoepYZ#R82%joQog%YMw5(xmp|Pq zkBa$_eHOm*a&d56~nOVNMbw;Nnl68{F^0!)N)IF`*p3ieDDRmskC+DF0`hj3Gz&NcJr2QIR|$ieFyd zylqC_(S4IARdw`>;#ZW9Y@5++K>y@`|5RA=j3|EP|5R9VP+9kMa&U4;6u&BpulzGr zj!6zrp8bDIBY95p+$er^6u+kYk+vC4>W@hl{TE zF1fxc{pCdQ%~3r4NAi~y{4v4zlN-v$R3Qkd7HI6Ue-!Y?EYA`j1Qe!wZMo?oKHD*v_ z7B%KjV?H$&QR6&nlu=_DH7=sYrPNqXjY?|VO^uhS@c}h{rDj`d_NC@HYA&Vb4b=PPf_zFYQ93v*QxmyHQyz(5t*luIgZRL$Xre4S~7Q% zxtq+rWbP;PAepz2c^jE`ka;JWPmuW(nTN=HmdxkLe2L6g$$W#%x20r$PUg?lI-FWD zY8^?fYSgMttp?O;M6GGmnoq6ssdYKE)=_H%wRTeLW@_Cm6!+LaooJ-G|x(sXd6=L#RD0MePD=kD>NvY9FBX&D6e?+P72tK59QeojTOXrcP7p zG^b7uby`!WEp?Vq=ThoiPMs^Lb0u|dpw4RQd`O*dsq;N`ex%ONWF0|PoU9RKrN)z0 zOx7f_rjRw8thr>}O4b8p{gbRiWIapP^JKk9*2`qQO4bKteMHv3$oiD5&&m3dL=zGN zNGv9?jl|0&z9TrCAV%;vf};qklL~4QG$Y6(XhqPTpd&$Nf_?-i6P!+P2EmyG!wAkI zIGdoDU?IUGf@K8D304uTC)iG~lVBgg0fL(e?jX36;4XrP2%eS_yh-o@!RG`&5Y`|} z5Vj@kM%aU}7hxa5A%vp{iwS2D&LNyfxPWjG;iH7l623+F4&ldypAddV_yytjgg+Ag zO!#k-u@uQ;NVX-}mE>@eqe&K$97l2j$yp@lkc>z!BY7doi%DKe@^X?_kX%i2Ey?vH zZz7o{xq;*+l8=%6kh(Rg+mX7%sk?}}%cz@iVfm9?Ga9G1PYYiwv#ED3_2yHrjC$u&ZyohEQEvVc4`a7w=hx+@de+TvNr2Y%ke~J39Q2$-(e?6b(kxUd6Wu8`8;N*Y{EgL`Q3 z2@UJh@H85Zqu~r1&Z6O58qTNTA{s8C;Z-zTMZ+~TyorWs8g8KBUK;MF;jJ{horZs> z;XO3GkA@G>@EIDuPs7h?_zMkxP0{FZ8pUXIB#n-tQ4Jbp(x@(tn$oB_jaty?I2v`M zQ4bpRq)~4gok*kWXtbV2+iA3uM*C=VfJQgd=nfj)Nu#@H^iLW+Nu!Tw^c{_Upi$~a z8vR7$BWYZX##uBDXq=>RJsLNlaU&WZN8?-?=hL_~joZ?=J&niFcm<8`r12+YHz2zU z*(Z^G3fcY19zgb)WDh2L0@>5Zo{kAKiN-{{S4VJlKnE-uaf;1+3%439@(GKo(|nq?rfFN6wx?-tnx070ekq!sOw&_o+MlKaXgZ6gD`^Pd`(yRx~dREqoPbX?7~j`qOLx&4$x#1kGmCYzfVl(rhEmcF}AP z%^s)O^E6AnNVBhL_6yB^rFl!5x21V|n$M>B5}Gfi`HeK+Li24je}Lvs(EKTyKTY%3 zY5pe7-=g{3H2<9DU();+n*YkNIQB4(J%VH799xTH>u_v>)UhGQ*5%j+96Nwx=X2~P zj(v`t7&%SIX+=&Oa@v#Ak(@5%bR%aVIU~q9hn&&m6p~X!&UA8Sk~5E-1>`IuCq+&~ z&N6bYCT9aVJEi2@PR`%Sxrdzl$a#RAN62}MoF~Y6ft;_&`GuTcX^}yT!)cL4i+~nM zTGXRO16q{OVh$}LS}dc*WwcmMixsrEiWcRxSVfC9v{*-rjVW3@LW@JR_>30c)8a>3 z{6dRgX?Zj)tJAU`EgR6X5iPT6*_4*eX_-UI_O$Fs%g(gyO3UuFJb{+IXgQviWwhK( z%g1T?HOJNAxCR{8h~ug?=D3y|cYOK3dSyJCN{}0pTbJDWrP%z^7@e1kGxaJ>rdVp_jmH{BJW=E9wqN_@;)c;2l9R*zc%^x$!|!0XYzZKe&}tT~*3oJct+vqW z5UpOJ)oZlQruA{O&ZYGTS{Kr~h}Pw_zDY{!G_4D?Ht<9 zqwVSxZ7XTJk+u)f_G#KaL)%|y7o**gv};bgeA>09-2mDRr`-tJEudXQyJfUnO}k3k zZKT~jw0nehkJ0WM+Wnh$ztO%0?c3149qk9wegy4P=g@v3?a!zE1+?Ez`vbJUnf6c9 z{$<*~O8cMaa5x=ebZAD0JUXF^XC4$ez;k?dUj!jw9(_3-=p&fbjhI0QFJ+mE-mTOmM-n-GJ-CJ zbSa|CQo3ABmrLogjV}9BbU8?u*XZ&-T|T7i(R9tCYe3gdbnQjgK6IT<*9CN4MA!9n z-Avc5bbXkvhv@n&UB9I3Pjvl-ZuxZUM7J(<8%?)~bel}K)pV<*+eW&j9;Mr}bbFp| zKhpg$x*tLJe7bj{dl$MF(Y=K3Q|W#Q-LIzmO1j@p_xtGn0Np>M`}cJJksi(Ikx!4- z^f;3qXVc?cdR#z{E9h}0J#L}L-K2WlOOFre@dZ7;;)En8G~tA1oNy8+4CI7CoNy&4 z+`tK|IpHNvc$*X6rDqL#hV-mU&+hc>N6(Y#If3tKux6*q%y+5J%xAgv=K7HvkfIb6L^jSfla{8>I&jzOWjPrTj}>5{eI)53{L99N&Pu#04H6_N!M`FwVZT_lV0JZ*EqQ`C%5F} z<2kvAlS?>xDkpE`ro`lJ70 z^zTmpe)K>Gg@%Q1)OmOXI#k{&vV8bobeU| zk4rJI9RoWsa0vr1WZ=aNypw?sGVmeJtjn2AIkP!umT=}A&YZ`YcXQ^$ocSn&vKZ8m zL5&$SjzOgin#Q0z8T2579%67c2G?eA7K2AHxRAlAA_nhc@Er`klOZvN)L=*^LxwYC zG(!p*ax+8jV#qxVJ(8iB46V)3Aq*YK&;o{D!_YMhUB}S(82TAQzhGD&hV^II0ES(_ zuqzmLCBt4IHSA4>z0L4O3~#~k;}|}T;qw{3kl_zA{1C&R<*c@x)s3@yaMpF4wVtzX z;;eT#>l4oUj1ip~(VGz`GU7r;tYE}djCh6-uQK9w&Th!rIh@^6%Gnb+dj@CE;_O|V zeG6yb#@Rn|&S9K$1n2bToYOexbk4bqb5?TBb)548=e)@|Z*y)Q=XT`W&YZi9bC+}O z3eJ6!b6@1#ml+u_vKb?fWn>N`TcjA-fsvgU*@uyR8F>;TPhsR~j2yto5sW;Skp+w_ zWaKzT7BljCM($(eU5vbkk@qw5K}J5z$j2D@93x*~Y^7j31qUd&nSw_t zctT3S(-b^Q!3z|;Ou=guyh*`36ueKtM-+TT!8eRPhS3Q|hm3B^=x&Ve!RWIYJ(khq z7=0O|S2FrKM(<y^Ot|u@5r#Va7hi*k>5~JY!#C?5`9ZK~XkE$5E6^Q7ektQq+^8-V~id z(U}wtrf4Wd!=)6Bpy(WmiYb~*Q7J{!D4IdhEQ;n(bRI=1ims)ooTAkft)u8BiYh7E zMA24?c2KmNqJ0$ILD8KQJx9^&6un8&PmDX9aWTd!33{J;JF8r6iunGk0}Cnk(#!c|PTiwU1o zd^E*LitAC_km78Ln^BxYaW{(lQhXA{{V6_!;z1M_P&|g>@f1%aRXl~_=@ieTcn-zq zQ+yr8>nYw&@lJ~OQGAf%TPePi;=3rmm*Ph${(#~yDE^A#Zz=wPiPe~R3=?ZGu@)1v zm^gun)0nuJiRUr#0w!K0W#XkwT+YOmOuU|ntC+Z!iIq%zh>7nq@iQiV&%_^@_zM$% zW7089s==iCOlriWCQNG1q!vs%o=N#k>c*rVOzOp?K1}M%q?4I+Dw8HL>4FrKwlnEz zCVkK3224Jd$t{@NlF4nE+@2}7GUX1Y{DUd?Fy(%x{F5nym=DgUMfC5KZIr=%Js$52v}lG>CcC`nRM zpOQwDG@+z9B`qj9o|1e@+ECJ-l1`L#rKATXy(l@6l9MPom6Fpb8A!=sN`_G~f>g=5 zloU`>NXa-#iYb{)Nhu}MDVasdTuK&DvY3*klth$VK*`0FTt>+ilw3v0N=mM$WECZA zDY=o7G$k7;*+R*7N_J7Qmy!dN+#;pqc1rG~N2l-8iM7NuE~hAB$x zQQDBwY)YF^nnUSvl;%;|n$mWZcBHfmrQIp*NogNS`%!udrTr;AgVI5i4yE)gO3$Hm z6s2P*EuwS+rIRQvp>!IhGbx=zX=*;Dizq#h(lSbyQF;-jmr}Z%(km&whSKXOy@Ap- zl&+_=g3=9?Zl-h_r8_CzL+O4>Z>ID%O8-vjU6kHS=>wELMCqfHK0)czls-$U^aV;^ zrt~#R-=y>%O5dmSBT7G^^m9tTqV!uzf1vbdrXIo6I8$3PwJlTIGxc<)4rA(BOr62h zg-l({)C#6^jM7cz4lGdD4F3o{>O=CjOvo|(Tf>u=0T9mT8`%xc4|cFa1HS!XlrTxKn1 z)&`}~K#_Z+HUcv0G z%-+ZB1I#`|YW6G4evR2bGv^5A#F=v}b6PQ{4RZ!EX9RQ3Va_7vEMv}v%(;;{TbQ$r zIft0@3Ugj#&Tq{98*`6h?(xiR&)km89mCv7%$>sAGUi^&+{>lRy`8!DG4}!He!$!> znEMs;vY6M9d5xLZg?W9L*Oz&v%$v)+`OLeLc{ea`HS_i|?{?<>oq5kQ?+xa?#r$f_ zug&}{=I1lN6Z5+;{~YF59V+IJXZ|J3znb|gnSX%!cQXGj=D)=Jx0(Mg3$UOX3yx+% zYZi22K{pnh#ey*`7|Vk5S#UWEu3*7I7W{(+ceCIf7JR~j&sbQ4g&_;;vM_ZD3kR`q z2n**`b+9mEVFe4fv2X_qA7jDKD3e8OP3Ni9B!#f2=M%;FLjU&G=xEMCXr2U+|ii=SrkFD!|% z1ixIou$ProzBvkEWMni*Ru3_ zmfp$I2U+?MOFv@iS1hf%(rHlj@QYN{UC;ichLRedBDH{2L~0qS4WxFG+D+;iQm>MF zowBBs6u)a6cCweE*FpD;xrd;=i;4Q{23R2&&5A- zNe3=DflGRE$rW63y_8FC;F8z5D64il1rcD(ige(WiG43 zWevEj5tj|-vU9m?6ql8A*-c!Q=Cb#=>@zO=g3DWSd0Q@T&*k&DJjLY^m)}z*Ji_IV zvHTd8Cs-b`{0x?##qzURUcvHhEZ@QMZ&?0smjA{Tqqt%MS4`xJU0iVsSKP*m3|1V) ziep$Yj1>i}7{iL|Sh1cJH?iVNR{X?@)Gu7wmn#QwSV6Ej;q#l)lFRW7FT`DRiAKmYp(9X)!n%IGOk|9)z@+LYh3+4SAWPg`CQY9 zYr1gF3R2gUbImHQd4p@J03WmRcviM&Wk*&{VdZRA&SmADtbCA_4{>c>u5HS-&AE0I z*G}NtiCnvzYj5S++qw2TuKkVcGPv$!t~-TuaQ*#U{~+ZxDNj;fkMaV_iz%N(`F6?=P<}Jz-%|c7H{ga|+;A#4^yh|)xZz4} zxSAWD=7yKK;Z;^8S=A)Ps%ESzVpR#Nrm|``t8Qi0?W{h6)zw*Dlhq?xJ)YIYtlrG( zJ*?ix>d#pHJ*$6YO+D7MV9jx?Ii5AStm(pJi zfi;s@Q^J~QtXa>RhgtIjYg@2(1Z!urwv4ssv-U#PUc%bTS-XO@Y1VFM?M~M2W9>oK z-pbksSo=@bKE~Q7S$l}JFR=DyQfpsh?Y~(2D{Fsa-QQT3U|q<%2CQq$x~8ma!MfvF zm(RLRtQ*3*k*q6VT_Nknv2G6Q=Cf`g>z1%C#kxyacNyy*XWjFxdxv%JvF;PreJ*9) zSFHP%b-%FgH`X7<`WWkvVSOFecVYc7){kcWMAlDceJSgwvwjKdm$LqH)~{gw)vUjk z_2sNz&H8n$-^Ti#tlz`>eXKvg`kPsQE9+lRvHmA+Y{HGFaN|U7T+WTxabr0Z4X9{L zMN=w{rJ^Mjxm2{GqAe92sOU^ZH!4n`qBj+NsW_R6)2J9g#hFwLp<*}{XHzkfiqTYz zrK0Msz>0}fOrc^b6*H)qO~pJa7E-aK3Pi>ER9r~KB~)Ba#R@8}rs7&Es)(+pVjUGX zQBg_7CMvd4v4e`;RP3YTAQiV#aR(LupyD1<757u|Pbwav;&CdTqT(4Uo~Pm^Dqf}H z4JzKI;yo%pq~c#xd`88WRD46l_f-5u#lJ}-eK_ej>1w2pAzhPnZPE$SNz(O6HzM6c zO1e4e7Nn0Solm+A>Gq^Mk?u;m2kBmf8M*GZ?|BKes633y7?ppc@@Oh+P+5!0EGk1P>rvT|%4{l|QJF*Kaa86} z*_z6BRCc7Y3zgle>`AJ!50(9>JcY{sRGvZQAS#Dac@~xDP&taqF;q^Xatf8psl1NL zaw>OIc`KE-Q~4~FuTl918)9sz!G=sWv|>YNHgsjfa5ju)Lm?ZMNZD{98!l$UZZ_P? zhTGZj3LD;K!~1MJf{oSLSd)!i*?1xw`?2w2HeSWXYuNY?HvW^153})CHXX^PYHZ45 zQ%5#+X44opO=8m&HmzV&Ih$6c*z^#ao@UcCZ2F1KhqF1x=EiJp$>!tPd>WgFuz47p zXR~<;o0qbAEt@y8c{7_2vH2A?zs8mfwj9NlW7yJ~EnV2sjVGzg*0yYI&(@J_9naQcwyt1nIa^n;^&ngS z!PdLk`T<+NVCz?GtIoE7ZArFuVp}h^^&z!w4BIBLZ3^2iW!p7uyOwSH*meio?qu8R zZ2N$1AF=&#wjaax8f@>x_Fio7!}f7(FJ=2Qwy$7&IonsU{XVuo#`Y)J{w~`;W&7vs zXvB^dQg$52j?>vOj2&mOV*xuNb}VDZT6Sz?$7Xgs#g3QQ@d`V>X2&n=_?4Xrb~a*X zHaokqvoAYOV&_zL&SU2Sc3#cS)$Cl$&O6!pAUhvo=O-z4e#_49*_C8h6LvLY*9q)8 znO&!{YdpKAvTHiKRVc+HKyOw>|v+r*9J~F^YJodL@|1kC! zuzw8uBlcg${^jg{l>N`L|9L3~5*%p6fou-+=fF@74Clan4wP}=d=6~jz)lYA=D;Hy zc!mSdao}?f{J?>qIM{@P$8#``gJ*E?EDoN{!7>hB%E8MyxQm0gaPT$`zRJP(QXKq% zoBzhmwYa$sH+Sad-rRg5H($ifS90^!-25sxzsJoVa7#;WY0E9`xh2IdmvGBv-0}#w zJi{%|aceVf&Ex-T>Ar)+po_kNf4=FtlEftBQphDCxg>X~ce#LqqDT>>C_bok3r!Fn z5G+&=5kXOuA_$6rSP&4Ti4aNxp$7uFOCdd^U+ymDF1dd24)6Q!KfAL#`<dzfX`XwL=jJmn%qw7C z5%a#pyst9vYs|ZodG|5zFU+fFUNiId@_ZG~AHwsWnSKVBNlOP}JUukq4pymSUH-Nj3P;ibRw(jH#wN&jnzr1=0uRhDGFZ1d`UhUwuUcA z8~5_YPTpwajSk*Cj5oi`o5%6yt-Se1-n@r5n|ZT~H+!gg>u}yWj<-(Wt>5z2A9(9- z-dfCCD|l-qZ%^Ru3f`X1+t={+4ZQtJ-d@1lZ}Ijb77b(3{wykG(Wxvti$&+K=wTK; z#iIZ4PG8;`#5+TH=OW(uu^R7O&O1wZXBF?P;oWh(JC%1Uc=s~iy_$D_%DXS~?%TZk z4vPn~cqEHQv-lJif0xB)v-qzp{uhfMWAP3ax3G90@6F}CLwWBDymvM4{et&yB+?FwpFQM;D9qp3TVx)Z27k-C$p`zCd#Qg;S*-=^*?>dsZ8?tJPlr0!zsE~V~A z)Ll;9Rn+~Ix}Q;ZJ#{xycN29tQ+Eq>w^Mf~b$3&D4|VrZ_W*T&r|zHBJwn}M)ICAn z)71T!x_Q*SNZkVJ7E<>~{_EbN?j7phqpp^^Wz>B@-D>LAQTHKro2lDFT?=)s)U~rT zEbYzGLs@z(OOI#iS6TWsmY&AaGg$gpmOjqXCt3O*mj0Ke3t0LJ%l2bg%>gW%#Ih+Y zD`#01%c@y+A9*{@jkYnHvnvRalcW!Vartz_9omThMFc$Uv#`7D-K zvAmk)pJVywS^g82U(fOzSpF-P|C*ZRzi0WKEMLs>)hu7j@(nEC$npl3H?g9U6^F9o z3#>St6-Tn-1Xg^N6*sZs53IPG75A{>URFHFiicRSo)wL(*vX1LtZ3nbL-^omKKL>p z9IwU)U*Us4@WJ2s;2(VO5Fb3k2Q93`%3iGO%gX+&9Ky<>to#Nm&tl~{to$A;FJR@9 ztenrvms$BLD_>{TAXbfG)mT=IXVn3$n#QUbth$s{H8rgI8LO^m)eWrr9jk6-)d#HF z!m4eo+QF(uR_$X|8>#T?nm1YV zHfug+Z4qk+vG!A}9md-ISzF55vsilxYcFN(k68N?)?Uln>sb37YhR~k?VGG!#M*aR zyPUNjux*la-F6$0u-4|H*bJpF$y4zTH2kZX8y8Bu80P8;S-NL$UtlPo5 zM%L|PT^sACv;H%zKZNy%vi=LKKSqu9$Flxstp6?RZ(;rItiOZx_p<(e)~{gwX4Y?I zeLd?NSihI`t!$XdhC|r!IW~NO4PRu#v1~Y=4L@hYEo``r4R^5N4{W%f4G*wkH5<0G zVMh%cn%J<54ee~`2X79XC=hu`DFAMoL2Y{bT5HV$UvP&N){<5)J1W8*j3 zcs3i)W#jp5{5~6h$i^SD@g+7cV&l7Pe4mZAY+TL8nzd{yVbdfwO=eR$o2Id;noS3? z>3TNZ%BI`dbSIncV$)yP^j9|RV{Q6t+~bWjb5tvgIJQT)~zb*z!xZ{F*H{ zv*k{<+{M;|*m?w8k7DaFY(18(C$aS#Z2c`;|HRfmv-N(qKET#T*!szLpsicj+RWCy zY;9v}2itnFtqz_6@e3!nV`cb~fA2W!sZA;mB73`hQXXZ|a|<{y)^eME%Ruze@cZ)W1!A9rYWi z-$8vF^&RZ!#g4x07{HD~b_`-i2|Gr!V=OzyQ?p|tJEpVaKz1C?jw9J|0y|D*$4Tt? zCOb}L$64$+mmTM`<3e^^%#NGb@dP{Ovtt`OcC({{hR`sGh9NYJqG5j;#?f#94P`XU zq@j|AYBd@TqTyf~4yWNb8cw0%G#bvP;XE2Hpy47KE}`KH8m^|HhKB2C_yrBWrQt3b z9-!ghG(17WJQ`l4VF3*bX?UH6_h_i4VHph{(6E|@&7S}@(a=g`ZyNj3IEcni(KwvO z5*kO-IFZK5G?vpioyJ)-ewxN3Y5WR}r_*>QjpxyL0gV^YcnOV{(RekV%o!T5qwyCs z{*uO9Y5e0S`oGcm1dUJ8_yUddX?%so*Jyl`#w9c^rEvv~t7u$H<5n7X(b!GnM>O@P zseq~tm zv-3E1p2E)4*m<@ZJI`b11?;?totLol3U*%2&Kh=J$If4{^L}=|!Omsu>|s|wb`56N z{_HAc*A#YDuxkdpX0vM!yFSaV&$H``>^hQNN3-ihcAd(u^VoGhyDqI^*N@nBIlHc6 z*H77XBfD;5*Ujv@goImMW7j5j7qWXeyC<-F8oNKu?t|HVIJ=Kx_c81~j@@5j z_sQ%&jooLm`#bDDo82|+{s+6C{zP8G?k()z!|pEj3}8YX_EfT`nmu1(&yU%2J$wGep8v3CK6~C`&vN#Bz@87;vza~H*t3H@P3&o9PX{%7 zy4mwF&An(Yqns1@`cAD>``EHu;q51DL|C8oNXnstM<|k;LNAnvrFQIuI%^PU0r@4{lT{Jh- zypQIOXbCNSXz5Q&0WHI5*`Jm&TB>NNrsZ?Ae1Vq3X*r6PV`%vrEnlbQWLi$6r=nD0>fM?-A_%5_`YQ-v43m zH`sd$drxQYx7d3nd;h}Tf3tT5dpEFmCwn{C*N=Sz*!L;+4QF2o`$n^GEc+(2ubh3; z**A-QRn+V|o_#-L-*xPJn0-&NZvp$>X5R|-tz_Ru_HAL`cJ?)}ZzucO*w@Lv9$L}b zo7N&)N77nG>l9jN(>jON18MyXt)Hd!NLr7k^;lX@P^0xkT2H0*ELtz5^$J?AqV;-O zZ>04mT5qQH7FzG7^&VR9qxAt=|4!>;wEmaY1+>0P>wC0*KD(e@|W{zluQv^_&x%|hBG&!g|3k;=bbODFOX;Yg<7af-M90l^+(O6gblgeDeRMoP$KUDrCmoN_@iZMT)A2SP zE9h8B$3{A~(6OD4Pa27iopiL((Md-So#^aMXAzwv=p0Yy3_54g`Dr>2q4Q8W52N!4 zI*+IGt91Sko!_AI6gt04=f!khN#~7p{)*1q=)8l@yXgEAo%hmN^B|oM)A=Z!kJI@S zoiEV&8lAOtE~RrVog3)fMCVpI>*;K!b03}Ubav7C5ncW18ba47x+c>#m99#7zbqrm{QPcGmy1qu&X>^@K*Tr;QP1jH9x{=|Bmk4=>8Mk z_tO0ry8lM^gLFSm_X~8tQA77qx|h?vp6-owZ=riT-3@f_rMr#pPP%*OLC-*XhSD>R zo(c3+&@+Rc+4Rhz=RkTsPtOQE;q}Okx*YBj)ZPM!w>Gg#4nkT(pkY4kp*DKPiPI@huUMr>78tJu8 zdS4*DFO%NaO7H8W_kGg)0qOmB>HSaX{fP8_OnN^dy`Pre|CQeJq<77W(tCmQUMRg^ zm)>tl@2%3iUV1l5?_JWnS$gl2-tE%6OL~7KeIkAONT2@Fr$G7?OP?XqXRP!YCw&f( zK9i)+DbnX$>GM75^L^=ak@R^$`utn^JRvoGo|Zn(NS_Aj(=L5FrB9Fa`B?f^OW(t! z@8Qz-DCv8&^u0#<-Xwj0BYl4-eQ%Y%uS?%L>AOt&ejt5UNx#9;Z-41mD*Yx%zlqZC zWa)Q~^gB=bT_F7~RFi&xk$#U!zsIHDQ_}B0(r<_KYm<@MIbIEg5*Z z4E(JO{F4m)vkbgn2L4qB{zC>nBmH zfekXSNd|Vxz-}4%u@v-@g1%BPKngx3H3h?@V1yKml7g91Fk1>LrJ!004w8a{rQl0a z@MS4DP6|$ug43knOey$|6r3#u=SjgOQgEph{74Eemx7z6;CE7Rixk{01$Rop-BR!m zDR@W<9+84))udpa6uc+}3#4G76ud44?@7TDDOf56E2N-N3U*4tE-7f1f_+lZE`@!i zaDWsRNMVT-j*`OtrEr`S9w3EfQaDozXG`H6DLhaLkCnpXrSJqPJh4U!Pm;oKO5xd3 z_+u%&LJF^z!k>gRlfpNo@GU8PM+)DQ!dfX@CWY&y zaDxKMQuvV+MT+`JQGY3_87@U5q-c~B?Jq^+r04)Cnk7Y*QdBKP2T9Sv zQuH|~I!cO;mZD>&=maS`ON!2sqI0F_d?~t6iY}I-t3LskqMu38Z>8uKDY{*X?v$dt zrRW|hdRA(RUX!9Xr08uadRL0xm!di;+8{+6rD%&3ZI_}3DcUK;1yVdriuaS^ky1QH zif2o4l@!mB;sd4lGgACnDLzJukCWmPr1&%`K0}JXEyZU^@wrlbzM2$YD#bsP;-5(I zl~VjWDZW*TZ;@wi*B7^$Npm8$j2pM#&3_4i`omwMGUy{292q=F1`m1JilA+Bqbgv9;lcAk5v|EOLB*P}ku%l(zr84Xu8TNM> z_LvNNMuyFoVXw%rMKY{bhOLlc4Kl1rhPBCXGQ5|X4DTz$`^)g*GJJ##FO%U@W%x81 zK2wHQ%J6C#evk}5ScV@X!;h2UUyN;U~-R(`5LWGW`28{302Ci44C~hF>Ma zuaV(T%kY#%o_kUu0NJcy(Bc7EJFUp7oGGd{OcvD6!k`aq##8MftT1IS^5lu3pQ$}=4Nu;EY zl=PR9A}JXxB}1iTq?AmMk_ss~NJ>5CJWS5lem6A3o>6DSZ)nsImjNDH~mdMEIGIExTtdfy)W#p%2{IP=*wmFl`{Go8C_E& zqkk!*Z<5ik$msWE^b#4pR7NkC(d%XOhcc!_#!QkilVwb~jF~25s%6Z9GUjp_^9vbs zql~#r#{5af+$&@LB4hq0V;+<-56hTGWz6F;=4Bc4s*HI{n#$*JSKjGWHxb8GD|Ly+FoZBx5g;v6so%AIaFuW$djo_HQ!w zaT&Wo#>dNa@*9dY+VCBBhr~=})BeN-4cYN^g?V z-$?1LQu=!-{X>nE-Xo>=N$CSp`iPW1E2ZDyBJu9Utnr7NX$tCVh+ z(he!@meP-9TrU~dSH=yHafLE&kc|73j5|QamC3lNGH#lTnCslnfRtmTqF}0%fuxz zaj8sPArp7h$iya@xLYQ+$i!Be*dY_UW#Y#&sh3RZE0e~{q**fQaGCT?nRKp9`l(F1 zO(s1ilNQRPRWfOlOxh}ww#lU3GO0((`bb%Ulns-z5mHtnW#gsn04b{}ld`E&Rw-q3 zr0hT``;3$wC1poT+3`~LRVn))DLYxpPLr}TrR+OWcD9tAD`gi;*`-o;nUr1q38<7^ zD`huJ+3%$6PAR)v%I=Y}`=sn2QudIPJt8$_k4f2EQno=RlgY(0d9X|#DU-*@WmmC2XMs zGI^OyUM`bY$mE@BGPz48_sA47rMFD!CsPK>lo2vzq)ZtlQ%1{_DKe#8rhHna93oQ= zl_`hGlp|!ymt@M9Wy9%>e({&T$y^lOubO1UMy2Dm8sXu)LUih?K1UFnR>TOy+@|rCsQAi zsn5vNXJzX1GW8{y`m#)YRi-YIsUOJHRWfz0Ox++;H_6nkGPP!xOx-P0dt~azQr=6- z`%3u$DKC`rky1Wd%EwCicqyMK<&&j+j+D=p@`I)Pb5i~VDL+!mkCyUdrTkU@PiYlp?D-~aqiX)`rNU1nVD$bCK3#8&gskm4wE|rQONyX(- z@iVFTxtdhmBo#MH#Vt~CyHwmS6%RX%{99(cEi;zOj2$wwpUj*jGiS@p<7DP{WabZL=Fes3oig)DnfV`? z`K-)*UQK4cBr_Mt%x0O{B{O?uRv($wUuG4^tYVopLS{{nS(9Ye6qz+!W_?9wog}j^ zlUY~GtQwhhoy_{R%=)d&x>aWVNoM_1W<4yk9+g>-%dF>Q)_XE*sm%JM{?BTW*`+dj zqRc))W}har&yd;Y%Ix!H_JuP0VwruZ%)Uxy|5RrGOlDs%vu~8yx5(^!W%ffd`wf}B zTV{92>@J!8kyJ)1M@nUxR8En~Dyf_+m7kW%nnR@WP^mmjDvy)O6QuG)sXR$4zaf>U zNagpX@<&p6ja2?pD*qytk4fd@Qu&lrJ|mUSN#*lW`GHh!k;-jSxkDaGYPwWaN>#N~9VAspNYznNb*xmKAXO(y)iTgo@s8r39su!f{MX7p8surn9)w@#l zzEsso)pDs?DODS#YO_?;OI4#(?UJf)srpFf^p-jOWX?dDGf3uqO6ClgIi)gZip;5& zIS0y|LuAgOGUqUvbCk?EM&=wRbIz1G=gFMQWzOw2GUrj5^NP%QP3F8IbKaIY@5-F5 zQaw|g4RKspl#4DXdiS4ItHDB&cQ`N z-{9h)U(i3eB)Bvf5DX561QUY7peUFaObR9kQ-Z0%v|wg1H<%YJ2%_N1V0~~?up!tO zYzmT_gDt_$!7V{)P!?h=Yltbw}QV0 zZwG%1-VZ(qJ`O$!z6!n#PYX{E&j>4pmBT7wT39u#7AC?PVV$sH*epCV%nDnF?Zcko zS>f5?IpGCi-|*saV0cA1GRzN0g@wtmD4ZD13}=P2!|TH3;q~E)aAkNyxGG#7t_g1n zH-xu@rQx1%Z@4eqA07@L2painBD<}UR6g;uz{Oy~rnBOWpqjjtHofcj(zf-4H9gbvt zvEt;aqp!R5`PV*#m>n^Q3W?XV{+K10M)#>g_|Dw2SsVc=MKJQeg zIbD$cb^6c6-FBpZlm2b`chx(m|B(J;`cF~niYWDlD0Ov|+A>OA8>Oy`QrGWD|0OP- z5yo|cDb-G?@*B9Sq)w31bIk{@RXa7F{#(bt#)I{lz2+t)9U_K zD=D_7?i=0LOn9~Gsqu!twcS+VVnD5?@vP!bW$|Y5Go#dvQR=3$cy_#bl)5=e-BR*Y zkkYJHyLiV6Li>1!DD{>owX`hWDc(6sEsIjiec<1Wfgw*{_5w7_|PbIUzEE4 zKl}{Hi;wyrJ)`4eqSS*?>Y;{bU3G(sPs6j8P5HI(d$i7oPl!+aAG1mE$x-T&DD`N` z_hCxSfisG`?vBrl&x+5E&xy~C&x_BGFNiOUFN#ucjZ$xmQtyaT?}}3IiBkU*rQR2% z{y9p0FiL&6qJ!VU7X3D@$Nvc1l7EM-`qjUIxBmeyUo-Wc)@$aym6je~`5SoWAK;n_ z%Qa`cUNt?w?lC?wgpD}XMjPpj1pD}sb^u8nKPM%qEL!Fd|k|zWw6?ZE=ZAf&&pGt$&DD|!X z+dqO+io2Etrv|4*seg@9|Mt7YGlEJ%T5+ec(?*{*COYB%=!8F)oi;Y87M<`wl=^nV zv*u;|cNPahP~-o_uJtSX?+wqIoMB7_|99s>P%mg&+-+x&5!4SF1Py~mLE|7ZXcDFV zBT9WYN_{U%eLqV5AWHo(O8sY)`q9pyS#V~M6=Vm^gBC%{pjDLmag_Q^6iba_u_#tM ziq(%|*-@-V$*OuOHA@E7OZn*huEDv*-F5`sg6=_&pl5JaaCUG`l=?}O`e~H<`<3m z-LyPN2E``-zoC)|4O6<*x++*v+-+B|Bv=|O3$6~X39b#U3zi4hN3oMj?rNBlXt7K4 z;D%sTuv+tAZE&OJK}p4kVrfyVY80#fYdJlN)%>+kt0c8i%4^B;U~3escFS+Nwg)?g z{ae+*F*C}8?ZHly2lpE|aLkNfCp#z)_WZ{G^-34)FYa0z9Ef7^-$QY@qTxss3x1Es z8Nsc=?ZutSg5$w$Q7nvNiL%p{2X{s%JQu}k47hOo=m9h4UOM)7=lA{Fc~BlaP*M5a zPY^sDJYL+bJa{B{Gi zQLI4}YZ%2EMX|-iu;Q zN2FEh)9bv;Mi$K+GyUx8gQkrc^M{Oo1|O9?pPAA)`C0JAA68#Rv8+EhUkBe5ce~|3 zw0|F-P?3{A1V08p1wRMBgu;|4);x-}h+-|HSgR=3I*PT4Vr_SXCu$z1hB3`G4^NF^ z?S9qTK8kg!C{%>A+wcl)<4a~YN%=6D9tOpoO2c>*>rfhoQLN+tkMyusSo=R^oe|ax z>;GZhAc}SV_YlIyVUyx+rD0|i>rxsvjbdG|7(Z#om}vzgM~%7Uch(u2RQN{c>92kWC!%P3YfrbO3SnvPK!f` zkB7H~w}*FxcSf;MQEYS+8xzF}qS)9dHZF>d-x1y&-V^>Qyf?fryuZ>*QEWmKD~w`u zqS%5cHaUtdEV;RP%7Lm+hEIi0J2iYJd^UV8d_H`kWJHUU!KclPViTiSQ52h9a!-qt zZarTq9#I~?8on039>pd_v8hpP`g2YV-wfXh?;F}@^q5IA#utnqGcAfuiDHw7OdL6W zQj1Yjru@2Or<_`;ctmNUauiFJq_j?{ z-?3UEQQU1?qIx1d5l;k(Fp5P{?8+#%IEq~r#g=SKNl(;F)JjQD)F~bw#g;~~t1Eu5 zExDp~%7D`wCn{EbA~T9DE4j6GO7`j5i53+N&7;^gB_Fp=Y1p@IqW!7aU~`vrbMxuqS%I#zHL+LRGE>O z>D0um#B8Gza})DQ=Cn=eo3W`Pd3HpxtuG}OBo-zXC6Z1}6epsTeu;T0{S%8z-f5fC zKe;)IZH;2vi@g~BdqE_YCYDt!2$@l=Jc@1kZ8;R@{x-mO6W14yC{L_NtW4Yx#cqyb zw?whh-!u5W-`2tJ)&EQL|Enf$`V}qvV-tVAWK+wOQ~LgAs{fZ-qf`1-d=rZk<;5xn zv^|P#`(4NOikzDr#Www#PoQ!Ul_yhq8kJ{Kc@CB5Q+W}U zBPw4-<&9Lnjmpna`7J7cO_gR;=}MIxs?4Iw4OH1dm7P>MMwR1KxtA*UQ{@4wJVcd8 zsPZfwsyt7Xm#FdzRbD48Agw29IixKiZ8d3YN!v!+4$^j!wx^_0$CNPn5NVH)_84hT zkoFX5&yw~6X)lxZDrxVL_9baQkoFU4zfd)Ws;5ykovHy<3#dAos`IG2n5s8YbrV&$ zQFT96k5lz-s@_l4N2vNDRbM7q^%bhVM%8zy`YzQvQ>{1EE}+_lRO?H%!Bo4PYBy7D zFV*%_?I6_-Q|(Tw-A(n=sh*&EO{&+XdR?kFp?Wi_&!Bol^~F?QLiJ@-zk%wjB~||` z)jy{Cr&Rx(>R(d*N2>o!dVkV~kv^RCJks+?FC={;=?6)_oAf`C{ut>`kp2|u&yfBc z=`WD}7U^%3{&&*Kfw`# zV+6+u?j?AX;4cJk5xht68DWgD5@9pKHiYd6I}&y#ypV7Z;c&tUghhmt2&WKEBm6Vr z6NIl2zD6?q8{s>I?-IUG_$lG%gkKW=i|{8BCy{7IqBV(&NnA!^2#FjLxg;i#C?YYJ z#C#G9NhC=`Bo>ocLSi+EwItS&SWjXDiA~x{Y$5RgiN8|gWNI|0#>Lc_MveK@SV)c4 z)L2W6b<|j2a%q>8ddUOSI7E#{sqq{&UZBP&)cA%P-%&G8&AQagpyv71yo8zqs5zOM zbEr9wnm1B&3pH<{=9|=fkD4D)>r863rB-`t&7jsoY9*<)fm-F%+D5Y0PHOF@))8tQ zqt?^ZdX`$xQ|onVy-lqTseKx?t5Ul)wa=&aAZlMm?aQe>gxX`MJ(k*2s6CC^GpId_ z+H#4JWI-97oi#mI#bC5cRsdJP%w^Qd%>fBA8$Eousb>5@SSJe5MIzLe7C+hq{-IJ2k zjZya$>Q<$0P3qRBZawPOr*0eSwxezb>UN@T7wTS5-F4J0r|vfD?xyZu>h7oR5$YbJ z?s4k=iMkI{_igHaLfy}(`#E*Lpzbf!OQBx!4C+;;UK;hPQ7@f(0rl!ruOamsQ?Cj2 zno%!{dO6fvO1)#$dxwm4GFp+*os6DjoI^%0GR`NX4;i^+6p%5FjEQ7SCSxiYi^wP@ znX!b7Wn^4K#tJfSAY(Ndw~%p!jJwEql#Iv8c!rGU$asN_SIBscj5o-5m-;7A|8(kC zqJA3nt5Lrp^&3;a3H6&%Ka2X)slR~w*HHgDN%dD#e=YUbQGY%4Z>D}J^~cuV_@^^jjKzPop1c^fQf5rSa)B zu0-Q1G_FeHbQ<>}*?2OIx6t@W8vjIQ4Kg#yY)WPpna#;;MP?f^dz0Cp%uC69-G;L1P zmNacm)3!8iPt%Sx9Y)hRG`*Ro57P9XG&`MU=`;&y7Sb$(W(_!V7iXTjhcowa=7Exl zXQe!z49KcMR&BECk=1~##$+`mD~qfaWVI%%9a$a8>Oxj`vd$vwT(WwT)rYLUWc4R& z09k{{8bVeMS;NU1N!Dnx#*#IGtchezA!|CxtXX8uC2Ij$NwTgaYYACblXV?gE67?! z)>^Vk$l5^GX0mP}tDLOuWbGnrFIfl3I!x9vvTh^mPO|PH>prp`AnReW9wX~XvYsL9 zc}ZC>k@X5$uaor_S$`wzA7s5x)<4PmgsjiW`iiV?$oihFpU6h`iDbvfK9%e<$gV``PFkUgI4BC;owJ&o*{WX~acKG}=Nj>x`>>}6zMOZN3--#~V9 z4cY6+zKQHjWZz778QI&&-bwZzviFmHi0q?eA1C_`vj0r>gJi!)_Ge^&LG#mSUY+J~ znzy8RCz^Mmc^=Kj)4Y)8*U)@5&DYZWAj#&p)BH}FzfAML()@3pq9eUDX2pvY!VH6#f)8R%sl+fV-9d4t; z9dxL;*?g4_uhZdcI{ZS%6goDO)Ugd6+tKl2I$lP{A#|Ke$1CZ06&-id@em!4(D4O2 zzCp*g==dF-PN36CbjqMpGdg9_sXv{D&}k^0X3;50r-)8vblO9weRO(}PH)obuSq(e zNar)?T$#=-=-iRco#{N5&Qs_-jn3=nd^4R(>3koZAEWaVbpC+OU(oq0x-_Os3%azT z%Vl&KL6?zqSxuLXblFUoKhxz2x;#af&*_rG@()ClieonXAbZbnvCUiTWZkNz)0NobQZ3*3$(d_`;Zll{BbbE_# z@6+u=lHJqjUW4wn=-!>~z3F}d-Ivk*2D-1N`?GX^mF}<8BSw!Z^r%XYw)E&mj~?{M zqsMr96w+fUJyz0V6+QOQ<0w6DrNS`2anir03K0{F0tO((`A|I)}3^;;f4~YdvR`a#lHKy~9}_bJnMv-IcR@ zadvOcUdq`kIeQgnzm??d_c{AR&gsB8JvrxW&RNMh>p5ow=lp|nKH;3tI5(4XTXJq| z&Yi-!b2)cD=ibe^4{`1z^g5AVXV9xMy#~-LhhDk#+D5Pa^g2kdPw4dxy^`N?UT4lb zhx2-IUc`CVa^7;zdyMm*=e!r`9nw34-VNwIl-{H0J%-+!>AjuaJL&y4y+5S)N1UI{ z`RzHsBj-=${CS+efb;L?{Kq-}NiL{N@`8{HYH-0IF39DA5nQm33vT6t+qmFo`kYLk zQ|Z&6K11j;ls-G@bBI1i=<_LkzNOFiT-cTiyK!L;E}Y7R^SE#U7ar%rd%5s_E=uL1 z%3PEt>7om`=u$2k$VKb8=w>b|<)XK_=tD00h`!nMZBO5h^qolGS@fMl-}~tM7=54M z;&d*q!^QQucmWqL;o@an{1g|z%*C(JuRi_Gq+d4uX3%e8l71E6WAwX+e)rP9D*bEH zzc&3x(Z7iPljwgN{qLjypSk1=E{StV$R#;kGMY;Yxa1g@+`}dJa%l>ep2nqTaA|)o z9m1tUxpX&|9^uksT$=oe0Wk)g!hnkza2W%JFkn9ejx*qP2K>UnQy6#}121IYKn4zG z;I#}~!@wIE_yz;tW#Ib^>dc^X7}SeF3mCM7LCYBQG=u)apjR0jFt{Gc!Sxwjz~IRY zp330+82lK6pWw1)T-Jum+Hu+ST(*wO)^piwT=ou^z02h-xx5pXcj5AdT)vdcujcZ{ zx%>q#e~BS+hSX(9217tv$Q7Nq;%u%smn*L1itD)I zdaihyEB?Y2uQId|Lz^?SB}3;k^eTofW$42UeTJdWkyDwRTIAFrr!G15$jK(B1v#C` z>6#>`J2^ecIh&kb|Xd^8h(dlJhh< zFOc&xIj@rQ9yuS7^9ebhk@E#P-;nbiIX^NinaZ$g3`=KN4TjZXSRIBnU|1uDHDOpQ zhV^CGV1`}Ju%QggW!NN!O=Z}0hRtHwT!uZ!u%{UI8pGaT*gFh+k6|A$>?4MK$*`{( z_8r51B$;~>xy{IJOKyL1hmbpz++1?=$el>;WO5gfyNKLka<3%!Dsq>Sdo{T?lDnSV zjpS}7_hxcS$t@>$FS+~4{WG}_k^3mQPmudGxz9<;eUaS1koy|BZ<6~qxgV1I5yMYn zcqN8cVR$o!w_$iYhF`+)D;S=`@GBX99mB6@_-=+DVfZnIKf~}>82%a~YA~V!BN{Pc z3?n8nVhSS;BpGoVBko|ti;Q@a5r1XG+l=^x5uY*Qb4Gl@h+oJ{A@2d={~+%@@;)N(6Y@SI?_cD7L*Dm{Jdu(07}=DOXEL%m zBU>@@Y)1BCWN(rq`!KREBR4W~Dl* zix_niqskbyl~I3V)IS;ZF{4{Bx+9}IGkOlABStS~ z^cF_%VDv6V?`HIIM&HipI~aW@qaR`PV~l>0(SKp|Ym9!A(Qh;Q9Y!ZVW%L(}{uiUa zW%Lh>{+Tf?7;^<$!P>q5(1qljjQBaqHHWYNFpgRTUP;edv7f>*m zg3BowM!^UQ@+lZg!FURaNEXba;Cc$yQBY37HVSrAu#bX+6da@AI0bi5@Mj9%qTqcB zKBVAd3O-|O3S&=VY>cs|GWHC{<}$W`vC|nlld%gJyNIz7W3OWDb&OphY3wS-u4U{- z#@@@=*BSdRV?Sl==ZyV|vEMN6B*w)USDkSI<7zOjHsk6st^wm3Gp-He+A*#p<2p00 zE8}`F?kvXTF>V3l${F`4<3444dXn*V7+;U^8H{hn_$($IWWo_9+{%R8nQ#{q{=|g) zneZSJ9$~`cOn8b3&obc!CcMmqSDEkz6aLDCzcb-oCVaqzkC^Z&6TV==znJhX6MkSq z@@EQDC_IV6lPNrn!b%jTQCOYAfWjIS)~2u?g$*cdOkqx1ids?BmZA<6b*88r zMLj7xhobW+x`3jKDC$Sir4$XK=yHmNQj|+k9z~-lDxhdQMMV@%rf3>PGbx%wQF1;- ziztdHx{9J@6kSWv^%UJe(He@@QFIeUn<%=OqB4rMQM8kyJrwPy=nzFmDLPKk9TeS7 z(Y+M?nWBd%dX%CkD0-Tr=O}uSqQ8(VdX1ttDSDftcPM&~q7Nzhn4-@p`jVm_Df*d- z8BA=(#4IMB%f!A+?8n4$Oq|NZ=}f$diDgXO%EbGb_&5`vWa0-*{DO&JF)58nHJDUO z(xh{l)R#&9m^7J5bC@)bN$Z(Z%A|58J;|h(nDiGW*J5%bCTB8v0F!f=oXh0ZOy0=k z%}l2nc9`91x%gH)TvCpk*QmldJ9wk%+x2C`V>>YVH&2Lz_fZy zYs$1UnU*}CX_qi<0Mn*3Z2{93G3`dCZDHCiOuLI|4>IjxrhUq^Z<+Qz(;F~7i|Ngo z-iPS}m_CT<^O=4X)0Z;6lM$dd8BLkdn;HF? zaVayVF=IY67BZuR8MiQ_j2VwH<9TMh$c%58iJ2!bvjH=+nAx0}moqbunfc6|!_0`8 zi~xfe2b5_6NYnLC%cH!-)2xm%fgA9EjL?i0-YiFq;Rox;2p%=KaL{81qkIejDa@Wqx<&4`KdD=8qyd|61m+ zVg8NGzlZq`GyhTMf5rR?2fwhO9t)bX;7k?_VnHqoMzCNR3vOV+Y8D)4!M!ZFp9Sx; z;Byvy$-+1b>#{I|g(Fxvj)fCgxQm5{B`rM4!Z%s?9t%HU(djHoXHmeSPAoc`Mdz|; zIE%)zXgrG+vuHVsRyWIt%RPtWMIP)U`%T?{UY9UuG<*KW>>LIRrnya2=NmZ8AWJzt7T)~ojmW*ae8B6xC zWFJdDCAs8VmVD3BHZ1MR((Wvs$Lpygf~#-f>X*3sEv|lB(lt%ErWMz;;hJKuxrS@5 zu%?|`?>A`mY>A(N=cSiVflG1@5k~>Sbh`B%UHgZj!atF4vFX z`t4kQfa?!&{dcT5ffXmQqAx23v*L1AT+fPitXR*A4_NU9E52f7S6237Wp7p{Z(!w3 ztlY@TcUbu`D?jCiw%pK-8+vfV_1v(I8`g8fE8OrmZg_`PO<2{6Rc%;xC9AGu)%C1; zkyUT9>aVPB%<2}bZpG@QtX|3LRjhuQ)qf?q`fsdhz?v-9G-pjdYYJI2ku}Fya}R6o zWo=c~)?{sM)(&EAE^9}yb_Z(@vi2}*KVj`Rto@D~dvN3V+}MX3Z{o%>ZrsYc6xN-_ zx-(ceM$)=Ttee8RBdoiNb@xzmG9_u0RHI}tCBrGnqokaYy_D>y9%1xuVsp9&7(++Mr$W4d2=|?uCvf*Sl z3}nMFHVkLO&1~4ohTUvk-aV|O;5#l~~l*qe=g z*w~kim$Pvw8*|y1%wyvyHWsjP9UJds<7aHD$EN;lDrD0fHqB$xLN*n%X)&9YvS|aG z%GtDyO}p8&k4*>JbT^y+#HI(>^e~$qW7E@YdX7ymvgz+^`kGDOklc*TmDpT`&FO3o z*<6#&_1N5i&5haIg3TAQc_5nyvv~-cbJ$$O=E-cH%H|ntp3UZBHebo+2ig1-n_pw| z8*F}u&F``KLpFcR=C9cNjik-rv-u~soWz#X+0u$Fec5suTSl;DBwI$aWh`4}uw@oo z7PDn3TdrZta<;5w%WAgV$d)p;Y-7t#w(MrhUbgIK%R#oh#Fj6(xdu1)cgVMiJ`gcm-rSt!0?Lw}(g|gQvdyBHaRS5Y9W$#n=Ps%=_>~qS#qU;;WzNhRb%29qIwzs$<`m)mddu1+18qEUD(!*ZArFW&9-aV zb}QTd#J2m`_BGpnVS5VO8?n7P+gq~za<=EOJ)iAM*}jtPtJr=o+aG27<81#T$@U-E z{u4Vwc4V-l0XxoS$A#?Z%Z@^J%wWeXcC2N`W_H}nj>p*XJUd=w$9L>Jft@F@vk5y} zv9k?3bJ#hWodxV%%+BTPT*1yg>^#cO~NvHLA{zs>F+*mDwlV(e+bo{sG4%$^+fjAl;(dzP|iC3{w} z=T7!Kz@CTL^E!L}!JhZn8?d(?d+W3JT=w>5Z$C+Ur?7V}d*`!vEqgb!_h$A!!ro`u z`#gL9$=f_-h+*Oh(U**AuLlh`+feb=yWHT%}G?-={;Vc)&%dxw1= zv+q;(r?J0AlKr*V-=6(F*nbxLhp~SQ`^U2XI`*$+|2p;`X8)b+znlGUvj08yf53sN z9H_~G+8pS~feScrAqOUKU^)k8a-f_8dpWS51J7~bH4ePNfr|Hk2V)#Og@cVb*n)$t zI5?7n6F6AJ!POkx$idAVe2RlFbMO@o{=%VCICL6^nscZFhdOa+B!?z&sE9-BICL|I zN;z~FhaTk6!yNjALlxKmL*H?@0f)0V+?>NhI6RWWqd2^n!^=6mg2T6Q_&yH*nZy6& z@K+rEnj^J1(ugCO9O=uE!5q1qBXc=&B}cB}$e%g#1V^6YXeEvY98GZa97#tn;^@U3 zoy^fW9G%C}O&s0E(H$JUpQDd+^hu7s$I;I?`US^oaI68x8gcAAj`ic%B^;Z>v4~@f zIkuf+2RL?!V=r**4UWCVt+@46ZaqE8tu48=6SsEZ)w;$*B@3`Xx?l_4%dUD4F+;Jgytl*9k?zo9N-sX-Ex#J`LpO)@B zIt;S<1ODgRl#YxRI+D(GrcKjyBrQV_ML<*pS%M-%L{StMiW>++93X-S$WU3r4-jym z3|Xa=4TUCMbkm(C=}6k_(D(4Z@BQcAbDn#j`+feo=iK|;d+sw;JTs4H4(6F3@yz8s za|O>l$1|_+%xgS5fM_I$x8qc21v*%Lx?5#X|7th|qv+wcjMxNcwbK`k#8qZbo z+=V=MG0**w=N{#`CwT5Dp8J632lD)2o|()87R+bCAuRYF3x3LipR?dm7Cga%r&!R&f?gJU zz>A;Y#p8ML1YZ0lFJ8}!H}GOTFRtdrwJaRZ!f7n5WZ{J@yqJYQWZ^?~Ec`bM|HHyQ zUK+qlgLvs=UOJPP&f=w~c+Dkt{lgMPFypg)I6fi=JlDGc4M}qHY%T z^74tidv;8MUcHr9xAN*PUfsiMAK|s5dF@zU`z^2C#A`S6+74c8+kUT65bfi8OVsLN2&if z^+!`*_ettMP5o!7{~YzFQ2%-Azd-$$s6U7L^QiwS^G~7+YeKb5k!$UMYLc>33_%{vzrQs@WzKu-lpMQ8X9O= zPQ!aNtf8Tih9(+z(y*I`E*g4RdI(F8Vd*DX`e~LP&(hOadOAz*Wa)z}eVC<>vh*J; zeTt?3V_6By#;|N0%O@rKW5oaS#~AMu4dVDEPI1xZ?Wth zmMvk~DweHb`6!lGuzWhpt5{ym@`G7^D9gXc@=IBMIm@qP`PD4{9m{{u@>f{i@L{oY z8Ov9)d=<+#v3v_FX0YNARvgBPBUo`HD~@Bur&)0oD{f%LA6RiSD{f`QU97l=6)RY= znH5`E(a4G>R?cJPQLH?gm7iqgr&xIdEC0;OzpAtH9#-DR$`6%0S=r6XK34AMy+OQJ z$a|mTy|Z}l9Ns&R_s-|Nhk5Tw-g}z&p5?vgSv7!F!&xshstRhwDW!m3@Yp2O;ovif7J{sgO!Vf6{D{v4~X zVf9U{zM0j3V)gB;zL(X1WAzSJcd)vP)xE6#fHebIGnh3;vF1e9)SbkdQ(5zQ)||td zFSF)8*8Gz-|7Oj9S@R@o7O-X^YdTolpS1&6JD9aatR2DHQr4cu+6!6xE!KXAwclgy zPg(nO*8YpN&$D&`YhPmR%dA~Y-P(6qH=1?RSXarqnXH?|x)*r(9!&v_b)*r+ApRxW|tpANV z>#t+|^{l^@^|!J99oDa5{W{ja&-zWQZ)W{YHcVr~JT@H6hQru!I2(>(!zbDB3pV_Q z4cD^adN$m^hTGV12OApLuz?L5*|3ET+t{$14Ik!z@1MZ?XY&48ynin5pU3;(#jAzRPwoGQr zRJP1w%N({`%$Cd9as^wiX3I5f`8`{1Wa}KZew3{rW9ui_dJJ1nVC(1D`YX2nk*&9| z^)|NN!PfiOdOus&vULYro7uWcovnM=+RfHpwtbv!C$Q~vY&(T*r?Ks9ww=qiKeFv^ zw%yCNzq9Qzw*8B3|6$vcY+Jy#m)Q0S+g@keTWs6L_91K^$M$2{ej?ky#P+k;{x!B= zT*vkwvi(xFe<)wY_G{Sw8@B&}?KiXiPi((~?SEnWQ*2+)_7)mP(pXMoC5>}v{3wkd zqw!NT9#7+mG@eZ3X*8ZqvLt{6MeeA%F0(J~w$53_*W5-B#jAF-Fc1&f* zY<3($-Hs#KaU457!;TZ!aS}UDWye|UIF}t?VaM0l@eOud#g2#A@gzIevtv6u_OPRi zrU5h!qUj)-M$lA7(-@k@(=?5y88lVXG>4{InvS69Saq6Cq3JZ5&Zg3N!7rRfct-lpkYni^+TcGA>CQ(qm;185#hb1}_BX&y%NSenaeo<#Fhny1q|m*yjB{uIrhr}+$;zfAM_ zG+#jTg*0D8^CdL@gyx^qd>PGG(EMAPZ=(6nG(SZ1-)a62&2>-G{6CtXqj>?%uhaY% z&F|1$PxCUG*U`L<=Djqx)4ZRSezY7w%OF~cXc<9E87*UI8BfbZT4vBvOUuV;`2;P; z({ds$C)08oEoV^I@?~1ir{w}#E~MomS}vjGGFq;s<#)8)Ld$Km+)2w{X}O1%hiQ3~ zmVeUnI4w`mvV@jSb`D_YY<3>X&STkm3Oi3@=h^K1GCR*_=LPJ%P@SC@v-1*m{)C-B zXXj>9|f5$q~u*93M=W>*EfD%mxYUGv#>D7y}4*OBZxid~;! z*QxCKGP}OQu5YpHJM8*CyMD;7AJ_5WNZ_uk*mVuNe#5To*!2Lr-e%Wob{DX_nBAk< zJ%!zK*^_;@r?LACcAv@av)TO?vZ;5cV9zo)PRRW6wnPOkvM7_RL^UHG4kAp6{~fQuh3fJ&&^IN%p+Rp2h5W zmp$*XXAOJSvu7iFwyd-kwrFMB%J(@kpub*&||meD$u)@iiPp|zIQgK0gC)+1;= zmex(#XWiq_xK`a4>0p!GId@1*swwBAGO-)Mc5 z*8kDENS)Rtv^LPXhSv48ZlrY!t=nncL+f5zJ81n7aUX31Xd6n~DB3D$n@-zY+UC)A z2yGvs?W44PlD1FN_F39KN82g1eTlZO()MlIen#6b>S+5VZNH}NTH3Cs?MB*er|r+Q z-9_8IwB1kJW3)X<+l#a7G`x*A0z}}PCdn$Xs!`_?OdoO$6U~dC^-)C<#d%M`% z!@mCP8_2#w_LZ>jK=zGh-#GS7VBci+Rj}_U_I-nWKVsissN44d`yOZCbL@MAeQ&XE z8T(eUZ#DbYvG0BM?Ox(_TSNd1MN4_ehcll(S8r@57YiQ z?F(pMNc)?#FQ$D7?MrE2LHh>UH_^V8_D0&9Y2Qaj=qRM4jE)cMza5k5sGy^gj+u1K zrsGgL4yWTtI*y{_7&=a%<8(UCr{imMe20$j)A2((eoV*D=(viGYv}k59oNzEdpiC^ z$K7;1M8^|!JVnQgbS$Ey?o~S8pyO>imecVb9c$=VPsc_&n&{}D6P+b=4yAJxonz@N zr*jgWQ|X*V=Uh7H(Rm1+AEEPTI!~hWOLTsN&TrEB13G_1=TGVUumh&^ayoxa=e2ZR zPv?zv{*lf<)AjJudK-VR7T}IaxbX`r?uju+M zT{qEn3thL-bthearR)FbdYrCj>3W^6H|bhR*9y8;(Y2PY4Rkfq)lAoivA?U8t`8A^ zK=&ZJhtWNO?n!i4(LIaqxpdE?`w+T6PWRDtf0FJ`)BRbxKTr2p=>9g{Kco8>bpMj> zU(0U(lD|Ej@cRk(9=w3G?iAKcnYTdg|!8ik@re`8_>3xOX*XUhB@6tMY zSJ1nP-nH~@rMHpZW_owg+e&W_ef{Y>h`!_(|06& z$I*8ZeP`15b^0!(?_&Bcq3Z5{{~Y^I zX8)Pg?LUkC+t|OC{q5}UVt+6D_ltg_Tuc`;M75YBjuK~zv&EOi`QmHhJL1RU7V&5C zj94Ppi49_t*eV)Dvt*FEDN#C^_PAFrC*`+8!P?FrQam!H&yyglYU>7e&3dUKa+mHkbalfNxv(l-!G-#uchC8 z((e)J_n7qim-PFO^m|$Qy(azMlzxk)-@8(9wiH|-1wW92OQhfrQgE{r{7DM#kb=KR z!QE1DpA?t|2fkC%hLa5>3@&(zfbx< zApIYd{%fWG4yo(kEd6&$|2=ZRG&x|N9B{B4aF`r$xE%03Ip9(`;BqD z!2ijBH8P-41~kcloibpz46KlWwK8zN3_Mf@enbX-R|ft<23{ruuaJRP$-pON;366L ziVS>R2EHkS$eSWN#GU!wpbh-@sq6|7q z2AwN|elCNqmOtxW~GU#y`^qdTOK?W_5K`+UmS7gxZGHAIB+98AbWH1@r zPX-ss;4&FJP6k)X;2IfxybL}?2G@N{245kAub06$$l#k~@GUaK}9vQq>26xEdZYeB~!ULpm zkQ5e4;Y6t`oGgV?q;Q%P&XB@tDg1~OepCuSCWW7s!V{(NWGOsN3eS+jGo|o+Dg2rg zenSesC53fTc%>9xC56{W;cukyIw`zO3h$J{zewT3Quv4z{zD4?ErtJ8m%^u{@I@(n zNeW+)!q=s6ofK}6!uO?cvlMQV!W~lBA%!2lq*satOHq*&4UwXQq-cZ`l}XV=DVi)r z6;f0wMMp}}$ED~fDLO`qj+3I#NYNQmbYYzoT_i={m7?!U(a)slN-6rK6#ZI?Zj_?G zO40pN^oSHaEk(~r(eqODq7*HXqF1HpT`8)UqGeLFQi@unXqOc2mZCN(YL}ueDIOrj zgQU1fiib#X-9b`3LW(Cz@f0bZCdD(PxLS(mNbzA(e7F=JDaA)g@##|h1u6cb6rUx< z=SuNcr1-m1e6bY&P>QdR;;W?i8Y%vb6kjLBzn9{NrT9rHep-s3mEsqqu6UsozbwV| zQoK}(S4iF~bN)C{cfl^W^B_&caSxTl#NrjYDO36$qnJpzpNXf^f7wNi4ul-waDf0mNF zq~u;HxnD{il#(Z;WPy~tEhX*Bsp-Z z95`JLtdaxg$$^Kp$vOjhP@_r!`_r(i)Gjn8Mai0y)VNy z%do97tWkzF%dnj?Y_|*_DZ>w!;a`*Czn9^ElHqsD@CRl1zhwA-WcYJ3e31-)U50Ov z;TvUmlMHW>;UC`7D#Q27h{%Wo)n&wR8Br=DM$3qCGGc;^m@FeIWW;R_Ue9kkYH9^g1cMSxWDb()* z)lxP`%H~Vip;C6ZlpP~wCrH_uQg)V8*3 zDZ5R|{vu^}OWA!=_MnvgUCJJlvL~f%ft0;1W$#GYQYl*@WvirYy_9W~vMo}!Q_4DI z6dBb|MwQ8^G3qjEyo{PCqo&BHX)tyt& zW%Thf`ZO7Rx{SV7M*m4h-!7y7ETjJ_qyH|WAC=Mnl+pi|(a*@}=VWxBj46~c#WH57 zj5$cgjFB8M{`-Hp{pJWn8t4J4?o0CgX0GajRt9 zUK!UXdGQLvA&y?}AWqgf{pD*JNmGOtm_#^({GBrXO&PyV%1fktqLj~;@6=xEt77NNw>(P+ho$6GU=}} z=^mN%H<|Q*GU*AK^ps3`MkYNklU|fbi)7NPGU*MOv|A>RkjWpB$!E#rYh?2MGWji; zyi_KymB~BQWpay5ZkH*MDFbB6K{92yOc^IrCdiaZnetJYa*Ry*f=oGIrd%UaZjvcC z%as3TH>Mv`jrkrhZeVUM5p-m#L4*)R$%IVwt)`rq;{U)iQOvOx-0@ z_sP@`q#{z$Pb!L}qC_eVl#1a}F-|JVrDBp))J>I&8mX8k6^BX15mNCnsW?U|j+2Vd zNW}?KagtP=A{AefigTpmT&ehmRD4S+E|Q9$O2yBm;!3IbrBwV{Dz24^KS;&RQgN$P z+%6UWk%~8^qDd+`WmrYGOe>IS17zA@nO6L#8Gp#wxN{aX?taQWO{*2?=RC2 zkm1W9FGiCbO zGW{Z%exppkSEfHL(^ty$Ua6cZl}AhENm6-=RDND6zaW)glFDTp*P%O66jyd{-(Pq;k1b zz9*Gyq_R;eo20T?DqEzoTPk~HMuE&2ATtKbjAEHFTxOKYjL~&6W1P%5T4r1%Gj5a_ ze~}q?$&7nt#{DwmL7DM)nemj&_@B&pMrJ%KGa6*Z2AQ!@W^9oe+hsyIs#GnNs^wC(O{#WC zRf|;Zmb$7oscM(157G8Y)qa_Ipv)XDGfQRWD497%W{#Jc6J_QUnK?~nene)TDl^ZM znHR~-D`n>2Wad*c^Cg-2uFR~LnagD6N}0J@X0DU!0a86&sz*xoD0QhGE7j#vJxQvk zN%cIbK3J+hD%BsC>Z7IllTv+xRG%c(r%3hZrTPk~zD=tCDb-I)^)piayi~s^)r+P2 zU8!C#)f=UHi&Sry>L#h)Db;(VdaukXsFPU(WY%DrRV=fH%B*2BYoyE?C9^7I)=@I+ z1etZ7%(_%&-669ckXZ|5)(V-mLuPf!>|&W+DYIwF?AbEAMrO~K*@wvN3uX4jGW&-z z`{y$IGMRma%&xmyX8%rR|4C-wA+!G?vmcPz|B%^>Wp;ziZk5?RGW!FW6PYte<{Tt* zM#!8pnKMb|%$7NGWzIaAbBN6OxXd|S=6p%!d|&4LOy>Me<~$^Gmdc#>WzHt4o6{_F zcFCMpnX^ykbjsYw-2O6mpv*0lxg|2UROU{Uxm7avV3~Wa%>9PU{ie+Qw#@yW%>9+j z{iDpiMdscubMKS656IkyWbPv}_a8F%e=_%3nfrpwU8pW|7s=dLW$t@2cazL*lDX|t zGgNBENlm%bOp=cPiiiank%K|YN`2^)cjUz zekV0INX>0hbBEO2B{la-&HZ&!^KYs7kJLOZHP1@T3sSR4YF?F^H>75z)Vwb>JEdl~ z)a;X*PO0gUT2fmewF9JfnADDy+DfUdlG>S4TP?L8k=i4q_G41}38_6+YCk2lCrfSJ zsZ#p|sr{1Fo+Gv2lG<-e?Zr}iiPZi?YA^jzQEIP}+TTg-EmC{0)c#FsAClTfr1l?D z`yZ)&QfmJvwQou7GO67v^CI)c%DlNUZ=TFMSmqrj^Nx_Zd7qbgKaqJ?%e-r4-mNn4 zE}3_)%)4LaJt*@YmU%D8yoECFWtsPy%zIPjy)E;W%e<8`Z@tXhDD$?+yzMe?kIdUE z^EzZcnO`9D2gv-PGJlxNA1U)EsLT9nnLk_R*U0?&GXF%Gf0E2UMdp8A=6^xvpC|Lr zm-!dS{0n9N#WMd+nSZy;e?#Us$owvu|A8DlRSrHx4n9H-{>1;gyF>qe{~sUqpZ|~l K|CED|`F{Y1iq(Yx diff --git a/main.cpp b/main.cpp index 23916d60b0..c03f08025e 100644 --- a/main.cpp +++ b/main.cpp @@ -466,6 +466,7 @@ void update_pos(float frametime) } // Decrease forward velocity fwd_vel *= (1.f - 4.0*frametime); + // Update forward vector based on pitch and yaw fwd_vec[0] = -sinf(render_yaw*PI/180); From a495c291e59545c0cc13377913e1717d562d6be0 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Thu, 29 Nov 2012 23:28:14 -0800 Subject: [PATCH 023/136] More stuff for head roll, improved head, stable dt in all field calcs --- SerialInterface.cpp | 11 ++-- SerialInterface.h | 11 ++-- cloud.cpp | 16 ++--- field.cpp | 11 ++-- field.h | 2 +- head.cpp | 57 ++++++++++++------ head.h | 1 + .../UserInterfaceState.xcuserstate | Bin 104737 -> 104561 bytes main.cpp | 51 +++++++++------- 9 files changed, 99 insertions(+), 61 deletions(-) diff --git a/SerialInterface.cpp b/SerialInterface.cpp index 1ac8225951..a7be21761a 100644 --- a/SerialInterface.cpp +++ b/SerialInterface.cpp @@ -56,14 +56,14 @@ int init_port(int baud) return 0; // Success! } -int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_channels) +int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_channels, int * samples_averaged, int * LED_state) { // Channels: // 0, 1 = Head Pitch and Yaw // 2,3,4 = Head X,Y,Z Acceleration // int samples_read = 0; - const float AVG_RATE[] = {0.001, 0.001, 0.001, 0.001, 0.001}; + const float AVG_RATE[] = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001}; char bufchar[1]; while (read(serial_fd, bufchar, 1) > 0) { @@ -76,12 +76,15 @@ int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_chan // At end - Extract value from string to variables if (serial_buffer[0] != 'p') { - sscanf(serial_buffer, "%d %d %d %d %d", /* Needs to match Num Channels */ + sscanf(serial_buffer, "%d %d %d %d %d %d %d %d", /* Needs to match Num Channels */ &adc_channels[0], &adc_channels[1], &adc_channels[2], &adc_channels[3], - &adc_channels[4] + &adc_channels[4], + &adc_channels[5], + samples_averaged, + LED_state ); for (int i = 0; i < NUM_CHANNELS; i++) { diff --git a/SerialInterface.h b/SerialInterface.h index cc2d76600e..fc1abf180d 100644 --- a/SerialInterface.h +++ b/SerialInterface.h @@ -7,18 +7,19 @@ #define interface_SerialInterface_h int init_port (int baud); -int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_channels); +int read_sensors(int first_measurement, float * avg_adc_channels, int * adc_channels, int * samples_averaged, int * LED_state); -#define NUM_CHANNELS 5 +#define NUM_CHANNELS 6 #define SERIAL_PORT_NAME "/dev/tty.usbmodem641" // Acceleration sensors, in screen/world coord system (X = left/right, Y = Up/Down, Z = fwd/back) -#define ACCEL_X 3 -#define ACCEL_Y 4 -#define ACCEL_Z 2 +#define ACCEL_X 4 +#define ACCEL_Y 5 +#define ACCEL_Z 3 // Gyro sensors, in coodinate system of head/airplane #define PITCH_RATE 0 #define YAW_RATE 1 +#define ROLL_RATE 2 #endif diff --git a/cloud.cpp b/cloud.cpp index 329b840ab5..155540bc01 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -30,9 +30,9 @@ Cloud::Cloud(int num, particles[i].position.y = y; particles[i].position.z = z; - particles[i].velocity.x = 0; //randFloat() - 0.5; - particles[i].velocity.y = 0; //randFloat() - 0.5; - particles[i].velocity.z = 0; //randFloat() - 0.5; + particles[i].velocity.x = randFloat() - 0.5; + particles[i].velocity.y = randFloat() - 0.5; + particles[i].velocity.z = randFloat() - 0.5; float color_mult = 1 - COLOR_MIN; particles[i].color = glm::vec3(x*color_mult/WORLD_SIZE + COLOR_MIN, @@ -78,16 +78,16 @@ void Cloud::simulate (float deltaTime) { for (i = 0; i < count; ++i) { // Update position - //particles[i].position += particles[i].velocity*deltaTime; - particles[i].position += particles[i].velocity; + particles[i].position += particles[i].velocity*deltaTime; + //particles[i].position += particles[i].velocity; // Decay Velocity (Drag) - const float CONSTANT_DAMPING = 1.0; + const float CONSTANT_DAMPING = 0.5; particles[i].velocity *= (1.f - CONSTANT_DAMPING*deltaTime); // Interact with Field - const float FIELD_COUPLE = 0.0000001; - field_interact(&particles[i].position, &particles[i].velocity, &particles[i].color, FIELD_COUPLE); + const float FIELD_COUPLE = 0.005; //0.0000001; + field_interact(deltaTime, &particles[i].position, &particles[i].velocity, &particles[i].color, FIELD_COUPLE); // Bounce or Wrap if (wrapBounds) { diff --git a/field.cpp b/field.cpp index 011fb6515f..f450c38525 100644 --- a/field.cpp +++ b/field.cpp @@ -62,16 +62,16 @@ void field_add(float* add, float *pos) } } -void field_interact(glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float coupling) { +void field_interact(float dt, glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float coupling) { int index = (int)(pos->x/WORLD_SIZE*10.0) + (int)(pos->y/WORLD_SIZE*10.0)*10 + (int)(pos->z/WORLD_SIZE*10.0)*100; if ((index >= 0) && (index < FIELD_ELEMENTS)) { // Add velocity to particle from field - *vel += field[index].val; + *vel += field[index].val*dt; // Add back to field from particle velocity - glm::vec3 temp = *vel; + glm::vec3 temp = *vel*dt; temp *= coupling; field[index].val += temp; @@ -127,7 +127,8 @@ void field_simulate(float dt) { field[i].val += add; } else { - field[i].val *= 0.999; + const float CONSTANT_DAMPING = 0.5; + field[i].val *= (1.f - CONSTANT_DAMPING*dt); //field[i].val.x += (randFloat() - 0.5)*0.01*FIELD_SCALE; //field[i].val.y += (randFloat() - 0.5)*0.01*FIELD_SCALE; //field[i].val.z += (randFloat() - 0.5)*0.01*FIELD_SCALE; @@ -141,7 +142,7 @@ void field_render() { int i; float fx, fy, fz; - float scale_view = 1000.0; + float scale_view = 0.1; glDisable(GL_LIGHTING); glColor3f(0, 1, 0); diff --git a/field.h b/field.h index 33004554d2..354a4a8149 100644 --- a/field.h +++ b/field.h @@ -35,7 +35,7 @@ void field_init(); int field_value(float *ret, float *pos); void field_render(); void field_add(float* add, float *loc); -void field_interact(glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float coupling); +void field_interact(float dt, glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * color, float coupling); void field_simulate(float dt); glm::vec3 hsv2rgb(glm::vec3 in); #endif diff --git a/head.cpp b/head.cpp index d2b2a0ed02..46dd25028b 100644 --- a/head.cpp +++ b/head.cpp @@ -29,7 +29,7 @@ const float DECAY = 0.1; Head::Head() { PupilSize = 0.10; - interPupilDistance = 0.5; + interPupilDistance = 0.6; interBrowDistance = 0.75; NominalPupilSize = 0.10; EyebrowPitch[0] = EyebrowPitch[1] = BrowPitchAngle[0]; @@ -40,10 +40,11 @@ Head::Head() MouthWidth = 1.0; MouthHeight = 0.2; EyeballPitch[0] = EyeballPitch[1] = 0; + EyeballScaleX = 1.2; EyeballScaleY = 1.5; EyeballScaleZ = 1.0; EyeballYaw[0] = EyeballYaw[1] = 0; PitchTarget = YawTarget = 0; NoiseEnvelope = 1.0; - PupilConverge = 2.1; + PupilConverge = 5.0; leanForward = 0.0; leanSideways = 0.0; setNoise(0); @@ -52,32 +53,36 @@ Head::Head() void Head::reset() { position = glm::vec3(0,0,0); - Pitch = 0; - Yaw = 0; + Pitch = Yaw = Roll = 0; leanForward = leanSideways = 0; } void Head::UpdatePos(float frametime, int * adc_channels, float * avg_adc_channels, int head_mirror, glm::vec3 * gravity) // Using serial data, update avatar/render position and angles { - float measured_pitch_rate = adc_channels[0] - avg_adc_channels[0]; - float measured_yaw_rate = adc_channels[1] - avg_adc_channels[1]; - float measured_lateral_accel = adc_channels[3] - avg_adc_channels[3]; - float measured_fwd_accel = avg_adc_channels[2] - adc_channels[2]; + float measured_pitch_rate = adc_channels[PITCH_RATE] - avg_adc_channels[PITCH_RATE]; + float measured_yaw_rate = adc_channels[YAW_RATE] - avg_adc_channels[YAW_RATE]; + float measured_lateral_accel = adc_channels[ACCEL_X] - avg_adc_channels[ACCEL_X]; + float measured_fwd_accel = avg_adc_channels[ACCEL_Z] - adc_channels[ACCEL_Z]; + float measured_roll_rate = adc_channels[ROLL_RATE] - avg_adc_channels[ROLL_RATE]; // Update avatar head position based on measured gyro rates const float HEAD_ROTATION_SCALE = 0.20; + const float HEAD_ROLL_SCALE = 0.50; const float HEAD_LEAN_SCALE = 0.02; if (head_mirror) { addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + addRoll(measured_roll_rate * HEAD_ROLL_SCALE * frametime); addLean(measured_lateral_accel * frametime * HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); } else { addYaw(measured_yaw_rate * -HEAD_ROTATION_SCALE * frametime); addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + addRoll(measured_roll_rate * HEAD_ROLL_SCALE * frametime); addLean(measured_lateral_accel * frametime * -HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); } + // Try to measure absolute roll from sensors const float MIN_ROLL = 3.0; glm::vec3 v1(gravity->x, gravity->y, 0); @@ -112,7 +117,7 @@ void Head::simulate(float deltaTime) // Move toward new target Pitch += (PitchTarget - Pitch)*22*deltaTime; // (1.f - DECAY*deltaTime)*Pitch + ; Yaw += (YawTarget - Yaw)*22*deltaTime; // (1.f - DECAY*deltaTime); - //Roll *= (1.f - DECAY*deltaTime); + Roll *= (1.f - DECAY*deltaTime); } leanForward *= (1.f - DECAY*30.f*deltaTime); @@ -166,16 +171,18 @@ void Head::render() glLoadIdentity(); glTranslatef(0.f, 0.f, -7.f); glTranslatef(leanSideways, 0.f, leanForward); + glRotatef(Yaw/2.0, 0, 1, 0); glRotatef(Pitch/2.0, 1, 0, 0); glRotatef(Roll/2.0, 0, 0, 1); + // Overall scale of head - glScalef(2.0, 2.0, 2.0); + glScalef(1.5, 2.0, 2.0); glColor3fv(skinColor); // Head - glutSolidSphere(1, 15, 15); + glutSolidSphere(1, 30, 30); // Ears glPushMatrix(); @@ -184,7 +191,7 @@ void Head::render() { glPushMatrix(); glScalef(0.5, 0.75, 1.0); - glutSolidSphere(0.5, 15, 15); + glutSolidSphere(0.5, 30, 30); glPopMatrix(); glTranslatef(-2, 0, 0); } @@ -221,29 +228,45 @@ void Head::render() glTranslatef(0, 1.0, 0); + + glTranslatef(-interPupilDistance/2.0,-0.68,0.7); // Right Eye - glTranslatef(-0.25,-0.5,0.7); + glRotatef(-10, 1, 0, 0); glColor3fv(eyeColor); - glutSolidSphere(0.25, 15, 15); + glPushMatrix(); + { + glTranslatef(interPupilDistance/10.0, 0, 0.05); + glRotatef(20, 0, 0, 1); + glScalef(EyeballScaleX, EyeballScaleY, EyeballScaleZ); + glutSolidSphere(0.25, 30, 30); + } + glPopMatrix(); // Right Pupil glPushMatrix(); glRotatef(EyeballPitch[1], 1, 0, 0); glRotatef(EyeballYaw[1] + PupilConverge, 0, 1, 0); glTranslatef(0,0,.25); glColor3f(0,0,0); - glutSolidSphere(PupilSize, 10, 10); + glutSolidSphere(PupilSize, 15, 15); glPopMatrix(); // Left Eye glColor3fv(eyeColor); glTranslatef(interPupilDistance, 0, 0); - glutSolidSphere(0.25f, 15, 15); + glPushMatrix(); + { + glTranslatef(-interPupilDistance/10.0, 0, .05); + glRotatef(-20, 0, 0, 1); + glScalef(EyeballScaleX, EyeballScaleY, EyeballScaleZ); + glutSolidSphere(0.25, 30, 30); + } + glPopMatrix(); // Left Pupil glPushMatrix(); glRotatef(EyeballPitch[0], 1, 0, 0); glRotatef(EyeballYaw[0] - PupilConverge, 0, 1, 0); glTranslatef(0,0,.25); glColor3f(0,0,0); - glutSolidSphere(PupilSize, 10, 10); + glutSolidSphere(PupilSize, 15, 15); glPopMatrix(); diff --git a/head.h b/head.h index 5875114e77..b0182cd498 100644 --- a/head.h +++ b/head.h @@ -27,6 +27,7 @@ class Head { float EyeballYaw[2]; float EyebrowPitch[2]; float EyebrowRoll[2]; + float EyeballScaleX, EyeballScaleY, EyeballScaleZ; float interPupilDistance; float interBrowDistance; float NominalPupilSize; diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index e02456d7171ff4467da391dddf0a8a01ec236aa8..5069cead6418bad2f51d22121b689a2636dda6fb 100644 GIT binary patch delta 33081 zcmaHx2UJv7_x69I+-pfhm|GP=5CnCGUKQ-U7wmLIKxPC*>}BTIdqW)yU{GU8j4?6M z_?l*7Ow|+v!Ao~KIfi$huTNwpN`5K^~O(` zPj70~(el6}y&q}j>R}yX9cmqB9d6CAW?HkX+13)P!|Jq7wa&H9v(C4!u&%VOvaYsn zvwE!CtvjsuTlZTJSdUpBvp#M;Zhg*r*7~ybob?Us+tzojA6h@Qerf&6`mOZ`>yOr7 zthcRyC@7L5D@~LZimzf-48>GhDQ%TtB~0n4bW&_CWw0__8KI0;#wZh%iAuJTqfAl? zlwxIyGF2&6%9R<)Y-NtJKv}2=WwEkcS)r^^)+!s7O^Qd^uIy53l|9OS<%n`jIiWnM zJg1yhUQ=FI-cjCHK2bhbE-TlR>&h?6ZRKxOQk$y2s#P`BKs7{duiDgT)s?8Gsy)@d z>R@%4I$9m0W~g~;zFMd{)iQOaI#+e6%hcuS3U!0JQQf3&Rx8zNwMN~o-lrZ=A5kAw zkExHTkE_Sk6Y6R8IrVw{h+yi)PB;gYd5r?wO_PfwVT?XdLzBLevhu`s@_5G zsCUwBx?PXdqx5J!M(?Hf*8Avv^?rJPeSkhtAEFP}^Ynaul3t)s)(iC_y;z^3JN0S$ zbbW?CN1v;2&^PLv^v!yuzD3`vZ__>cc72ClrQfIT)eq^AN*YK=X{LF16|uyNG*k8#R)(s;@^ZCo(kHr_EV8t)qK8Sfh(7#|v6 z8DAUMj314kjhn_TB3z<^l7ddDwi!eB3-) zxvND_%S)A)TI}#E5>knf%7nCDNE?K7;@EQEi58zsf;w^EN)z2Rrb9s)XRk0^yS4n>6{Dg$0M0;>-Onmaf3G)+@ z<6@3|{p}u$T-;~g8#zQ)7jH_Ko9Q=xevCc1b6m>8@$-{YlI_9A_FS7JwJKiU zIN@Vk=KQ$WxEOnIY;s)O!U^-^5@Hf7gRgHfi&s2xeT6Gyeq4N`^W!>q_6|!)imiP1_pg$zX~O3n;d4>=_z9nPLwg=C zo)_9)ey}b~Qfxw97_XX`g%@Z>bM8U6_OkY`eB_#`S_fDM5@{VQeBKv6AJj2?Soznj z_Kinc$5zH(H&yF6>v$rq6NS$u;q!5w!zY!aZbvlEwNCaj^t2XQi-@#N5k8*@pU>+Y zzVK?8bK?CEuVv1UjZg8$PDx4hrYSx-(H{Iy9c9)EFN?2rrgavPEe{KyuY}LnmA7w4 zG+t<3TpwVGbt#e7<-+H(@cFhbz!h(Rt*(M;{+aV*J9kdD2gi0!@K!S}C8qQL23Tv| zR7(_vaB>lJlCW0=pP}CcUkZA&e+#lYu!^Xn%%tq)s|5@~%@_}maaKi84`QaSj~@F7oFpK_<|v!1m6$9l^8r0}^Ze0~=` zw}sE2`>an}pRqn`JuQ6x7Lp{SdxYfgtwSz(t{lu*7}8zEL;67VdgjPoLcK;AvJoND>UPp^@{bX z^_um})*+U_fW|`d@#@YB?Rh+NN>WtQb#GCE!}^o;`ql?5ZKb`|8`ht<9<=oIziIv5 zOa7aXouh@;X-Qe<(8UxdRiQMe~mLs~g|ao%O%z@bye@WKsMBlt3?G8zEV1 zl^`K0UczvtotH3FNNNLNd+RkN!b_;v5o+$N%4hx@;Z5G~p^-|OJMD-Pr9>++N~{v6 z#48C(qLQQ}D=A86rHhg(BvVMOh15n!K|%@^Qad4q2`OAi9fZ_LNOmDb2`NTMaYsC_ zHMX?wrVJ8Nf{>Dh)J;gegw#(+gM>6pNTY=`PDta0lqsZaA>|0E;BE&gL+eT`wep0> z7R8h3V~J`ysvZichZcFEarIDOJ@ld%%B+Xl)Y?`a&{Z!qtsaW-LOq+#tiS45@32s|L^YjP583LWvtGzm4@K5PEt^=Pnl7z}qU)jY zUT9T46k89~dZG38P<%afrHLiVzf#%it?3paCDtn2gp}m1X|S@>opayaE?26R-Cm~} zA*IwR_X(-9*Qu?t&s+HWh1BI8t|$kTgWguBpyh7mLFKSA%5$WtrJb58q^?5pt{CNE z<*4$AWw`RF=gX#+j&W&za$Vyqk13C53`-waS}=Leh`fs8J<4&){t483J)xZRbZ=$} zX#5}Llqaj1CDrw`@@#{u(?aU*-3adCYvqhr*9$`G@o!x(D~FYFZkh_IXPvCTf66*9 zq%^-K4U>LTxzLcAw}sUE4*k1c`uBuO>ho{@kCekoW*vXuI{vo*vS;yc00?pJ`%EQVdYJl1%$FJo-x}Z;9MV^q-gfz;p(OWcAgVeTa@VH^= zQ3Z8|^j=K@scQ_iof?`oEPd$AioAU1Wbd%OYCAQ|^T<6G#TB7;yqg2HlaR*T(HQB~ z7$u~!|JLZu!L)zmAfjQ$$!h1j6Ruk-4W_PY_l7t1)#-};r>;`3uI3H8rmHjV##JkXH2F?`XM5@A z2&wSj^xkQ&cZYk|VEw$~8`y-p#4B~Nkcw;7r9ztGIo`t3E?||q)}6LTU9GMWl0!(& zJ?c7jy^y8~snm0$g(biJR&|FvZLhjb^{Cs0G)+j;g;c&*-Kp+UtAsQ|NNa_(&NI)~ zQr3TuJ9VGBSKX)HukII8g^=b8$t9#E4I&Py4|&NRP#;te3u&g1W(jHbUiFCjuzFNT zbA&WkNb@`~EiE;!lj>9dm+RB&GwQQKS|Fr_LRu8s!;iXuqq+o7$eUSFHZ`vzFS#M! zS@o6wi{w@HHT9g3+(HsUS{&N_$9j@U1h;QQaNUY%qPA7v5>on!(SDYaMrrS;AGy=^ zs}HE}s_&_X)eqDU>*OyL(lQ~<7SeJdt=O+#vZ(4O>Zcafq6%rHJ6lMrgtW$U%W8SZ z^)1cRE9zD6Dyx_>b+Y%6x2+_ob(D}+*Dtm2)bGa*OYf6cF?o;r9c4Lw{-etZ^5zET zRRqVzc8QA*9x>GK%7iJU6_d*g^Ctbb8BVJ|yHjgv>2BOG>EATmX$REb)m!RqrHlHf z`j`5*7$a>I(q#ugWsSLW*wX);`|7KeIdTf`MVpKpUEz+H`N3&^mAyo^hW{(!7MGI-S zknZ!G*DW0blC;iVhh!~9NPC2|caPRZOBK>SA>Ci+5YkKQ>vibubvRJx&`;|xq=P~_ zG-mdcf{J3#aKq9jV5m0YUv49{Q9^o9NQdhjLdIzmy$<8OJV)wyGPFz~JuIZ7|KJ&w zuTB1!o8|zvuw8x5Rbsz3j>t4;NmHlhjG;O+&9v9Ma&n?5!I(?Qq z^#N_RHmAjNExyv`YYVi6+9J)RxrOwEkp3eY*NVp921v5s`gO5@r4HCBX!1k zpZKZTWA(-t8;nnQPPek8X4E}GYNxa_?zDs2liE|-)7mrIv)XCxIqi8Ny)2|xg!HPA zUK7$eA)Ob}>q2_tAd%Wx?L{JKsXb4a_L}$UNqSRAZwVhicVi*F;r0{Kg%j@vSb{9B zBztgt;zE1y{MfjZ7<+JXLj1yo|J{_e_uM(P-Z*dEDXp%3q> z`$GFtNN)@29U)y5(z`->Pe|_z=>s8sc#v?yw9DGJ+7<08VcK`@EN|?Ogmg(rUkG`i z+s9AHqdjv1Ew8#J6i%7!EQp#kZCXRP-?iKBwA$w5MWbhGwLgUPalN$D+TZTfJvzEY zNS_GlvpWOs)qV6P?vy>v3z|+L$zX|Dg zA>9(vZ6W<3q(2W3PGs{UJxkBlbM#zymN$$yr+*3QZy{TR+~~w7A(neBt`eGAhUt#X zVd{glmp)Z5o%r7qTl(aRJ$e~s|G4(EyrtIyLF*2)|co@g)9rX ziIAHLxtWlg3;7-)-z(%6LiRmGq`pF5sjnhZU!$)jQeW?#quf%+enPg^eO83rs_sK; zj}&J4(6v+FC1n5m>XlaOHJNwC)Jd$?cT+ZISbF;8in{dcJM}&F{@#a<)B64H)LMPN zkkvcIIphT%5VF?L%@65E-6?zYBl^Qa)`e{BX}(r}R5W@^$mXa)Qwl~^%pF;Dhy01V z2DILU(nwca{IarbP#e!Pv>w;Nz8}(C3i}#{*jQwYxR$X z9FgPKa$ujnWAmIdC(r0PV|4lC$qh0-*T3*w2)DF%eWQQdFzSksJ2m)xr+@EGyYC;` zf6{;VcIkEfhW@kui~g&AQ~yoKb|FU!IZDXULXHt~tdQdl=(mW}|Iq&=l1Kv~$Jc31 z5OP=V2X8rhWP7zu@yw2}yz0{Cd*3?7#(FouxcEeSa8g|S!n-f-?ltZ)TDVhcjeCWh zSZnwSIqCm1HN$GC?zCD%5pr^^p$R#q;Z$>XZbmC3!0Xgn$en8qR~sRB8Jk`8@YFH14f>Z2mN==7zOUsy~bpt&?pk}U?C3^^2j>`_ZlU} z)VsTT-R5qT8f8KrBIKc-CpuZeJ=L8ohR<*zkMR87$r9rE*n2rj$myO$nkTZmwd5}n>k}$%JZHC9#ZtSR= zf0op?OnE8z%IqaonA<*EnW8W;`zBd?8N~a=~8X1mVU>Ay4*3a(h0uTMBZYap%+; z&kDKl&S2em-gwoWcEC7eykML)UNl}ZUN&A4a*>dWg*-*bB|>%x*(v0y2aMN@bH;h& zbtA-hv&9QSE){Z_kmm_`k&sh`>=KO+c=kkE4*P#(TrxhUnemD7sqvZdx$%W3C(1IW z`D`Ii7xFYA&-FYVWl2lA?9SP1d}~}Wt_rza$Q44K`4-KL?~Lz_kck5eCYM%BDV#F7 zT*xzooLaY?8b2A=Ju%Uikj6JC^L}U>bZ12CI=9YuE`P7PoaNaRZBd3BfBN=vBjh=v z@xeMC6VsC8r}P;+x=-1pnNug1RtzbdlvgpOtaOjr$a@3%zfAQbeN4%7Bia&akv$FZ9ZoPJa9ZMHQ1h%~K2o-gDDb!A@Yef05URBgQZe|+cReM_EQcecqXpIlln zxx7Qi${@dvo*!c@O`ElL=hT`3LKYrNtR*D5t=Zn4cE1^HhM4WlP%})(ON6{s$jgMh zT*xc#x2R@>*}lT;_jEQ>y+9Ws zukm~rYY83F!|Zi;9htp_yzam2$n57%-DCDQ2MBq+kT>0FntRQ`=CJ==LgsLDgpfA~ zd7~#Y&JyD38D}wkHVe7Zvpvqz)|F%C{?86!PBJGqL@X5YwmT80cq5jW4k3Glyt5%< zsX6^$5zEaPLf$Uq9TNuj>D#wpN<~?DXb)HYLvq8oIp%zKis!F5i`8eBkgGgH;w^zK zg}In!<`Q!$1?F;dg=bE@Wk_(1w?z&KdH+RomATqnW3Hu{xz1c~8D_4q3^zA=zKpjF zckLGPejy)p^Nw-1&CN=4i?_`Q7xMi=zOTN?-C6Z>`r6#-&e?13GONsLA=e6dkC6A? z>C=!q-Cf`J|J%jp{yOWvf7~-XRneBFL;l&&|D%=y%P{YcxzRl8#yg>dLOyUu$1!hH zEfsQ2eJdX~PvrPTcphnEY2|5~Xz{Z=;fYDK#5MkpdCIdf(GuQxkNKo$eWE3;@iXSL zo^y$oh=CqxQ4K8~fEJHHi<8je474~4EnbBd=b**w(Bf@qaS>X)4=sKKUjux@z&9Lx zyMk|bPhyhA<|+YSC-|0v?=^mOVIKaX!#nndk_ z<)`4+8vJ^LUk>=K0>5qGw;lWrf!~9k7Ri>ju2;bCHSjwRes6%^Tj2K&_`L^yAA;Y< z;CB`LZi3(6;BNu{#^5i3zc2Wk;NKejOTd2y_%8zg72v-E{O<$*L*V}?_@4s*)8Ky= z{9gnA_rd=|aQR;X|4+dGGWcHsYge!i0_za44g>25uucGL23Yrk^(a^$1?yvAJr34q zzN>UmJ#0QD_U-v;#}sPBRL1*l(v`VFYxf_fFy z??4L#Z2)MqU7+m;?ILK`LHiqY3+O(e%b+&{{T|TUg5Cl2PM}AF9t(Or=siI11$uwb z2ZBBr^bw$s0(~^-`Jm4PeHQ30&{u-K74)5;AMj#_KtBTdQP3X+{RHSIK|clh8PLyx z{t@V3fPNM9pTTGjhCdjQU?hQ&0!AtrUBMUz#&|ID!6*e|8W`nZRDdxHj2FRp1B^>x zd;%`xYcMW@aRrQPVB7%X7cg#u@jI9nFq?uI31%XgBf*>qW+s?9VCI2Y3g$F07lP>m zQ-HYy%w=G%0CN?X+rZoo=1wrHz^noDK5UqKz|%*<9fMYnL#uPp>LRpy4_aM^R=1(mpU}D$v~CBj!=UwGXgwNQkA>DVp!EW1 zy$D+GfYy7U^*(6*IkdhCt-phSC}-%;2j8f7Xm(ofUh9nduZbeZ8T_;2yF&KoAJ2)qS>e?yQ3 zf*M0mQx^m^hoE~QNQ0mt2nvRvPzY)dK}isl0zq9MC=G(TLC_`$+6h4iAm|VT9f6>u z5cDVnoq(W|5OfNHo`;}UA?Pazx(-11nu z+aB6Rcs8Y36ju(kT@7tdLfgv_Y(j7X1owpC-Vodmf(JnGUMrQo({n? zAb2JOFNWZy5WEV4*Ff+(2;L0ATOfEF1n-036A=6?1fPT8*CF_Aa0Oq4;P)W-5(Iw& z!Jk3!6$p_bq$Py-Lx>6?I)p?(NJj{mB1R+Nu*l%C z)nc?Sg?6i<-6PQMU1)a`Lfb%SdkBqy&=d&m;%U{*($+N+Len9%3_|BX=sXCmh0q5e z^e}|J454p9=-Uwb1BCttp|>C`48kHIEE>YfAZ!kV&4aLA5VjA(_Cwfd2zv#>UW2f6 z5cUCteFR~bAnaoZy9&+2zJsveAncasbT^CQY60zALVJH`uRwbZ+MCdR0JNV0?e{?Y zHzC{t;eim|3Bv6V9u48K5S{?xNf15=!qXvq41`aF@JtBLhHwXjPlfPu2(N(fSr9%S z!WTlg3tZvrApAZEKLp{&A^Zsle+t5%f$-B1eip)Cg78-$`~rmk0O7wu_$>(k6T<(7 z2tSChLWBwtIz*TdQ3MgwAYvgzxFBLVM685})ex~3A}S$bD=s2D5U~RyY9Zn*M4X3+ zD-dx5B7T90-yq@^bZ7z{nn4F0I+)O*HFRhL9fF`kFmz}K9ipK_EOdy64vEkq89H=^ z4yn)~7dk9}4tt@)%h2Hm=-9FibTpx3Yv>pN9mAkwICOdlIz8-}+QV|e)f#MpU<(FY zDA>Zm))8!WutkF{4s3~FO95Ld*t&tOC)oOctv}cXfo&+*Mu06HY-7PT0c@FI%K=+H z*d~Lm7;Fx(m4azU^@@CH^KHc*xtp3?E|n~ z0^6rx`vPoVgY8?eT?5+>V7m^sU%>Vo*lvUEFR)v{?gREFU~dlg7GU=Sy8?C{?5)7w z2JCIY-VW^T!QKJvHn2y5Jr?Wour zL1a8cra@#kh%A7}sSsHPk?SCG8$@o0$j87H`4mJx1Cbv>HicM!h*cmq7Gk?VY#PKCLF_b$Er-}`5W5>4t}(;~ zLtICQvq9Vdh#LuU=@3@|aSI{N1##OTZa2i$LflgjcNXGag18?c?stg04e_B6Z-@9O zh#v&;=@35#;%7qqB8YcGyz2nOABFfwA^sf1UxfJgApTc~{~Ho4kkA$qIzU1vNEiqS zqaa~4B$PwKd`MUb2{n*#01^&C!t;>u8YG;DgkK@yZ%DL2VkjiqAu$RPhe6^va3xNF z#3hip782J(;yy@x2ofKL#E&8I8%X>XlJ0>d6_RvFN`a)FkklKJG9hU)Bo#r@Mo8KL zNxLBF7$iLjNl!!4JCO7-Bz+1=w;;JOBukJSiHqbUNKS#|k&rwQk~1NBAtWz{fM)gOo@}iH4MPNXdkhY)F|0DN7(_8Kmrol!qbZ z5lDF-Qa*>2FI~{N33T>@&Q|Cg2c1))b64o>fX)@rc@}iu37z*s=lh}a3()yIbbbRm ze+Qj^h0ec0myXaS4!R^jmx<725_FjiUA94&-O!~Ly1WQo-heJ|L6={ki|a2)1yVag zY8<2{Kx#Im7D4J1NL>o4>mYRlq#lRVXCU=7q+Ws48<6@7qyZ(H5KfbKdjx|`6w2Xr3@-3LSWQs_P#y3d8~RnYx@=zajYzXaXigzguh`%UNp^k@V< z`azH3&|@U@sDd8%LyrT{<1+O433}Xsp53A60O&agdaj0^mC$o5^!&mFJ+DE}@1a*0 z=+zr~^@U!Q(5ni1)j+Rrq1Scj^)vMD1ij;-cOvwzfZhwCw+niohTgA0@7JJD6X@dy zeXP)DEcD5NK6%jR5cGKz`aA}Gu0x;O(8u*B^z90L`$69U(03X1T@QUXLf`Yy_g(1w zKJ*KKeqqor9QtKLzar>21^U%OzXzb-Vd(c2^!pzA{RsVIpnqrRp9=kFLjOh3-wplG zLjTvH|C=zt3a$YGFdz^HjE4bvFkli4cmxLg2L?O|0~^7>dthJ-7?=(NGhtvh3_J`2 zABTY_VBifH_y-L93kD^_pdK)&7Yv#SgBHOcHw-!jgU-O9voN?B47S2x6&Hhtz~C`3 zcpMDg34`~-;QL|lS1|Z{82lp)iGd-VVMr^T^A28L@eJP3vd!|);) zJ`IMK!|*3z_zN)nMHt}+BU-_T02q-2BZ^@}35+-iBc6j1XJDiRBYk0{AB;?gk(n?u z8%7?1ktbl}Nf_m_z^LXh>RuQ%5=KpgQJFC6Q5ba!Mm+`TjUoMBNcV;GVURu!(kDRr zdPw&``VL6{4AQSa`ZXBc6-M`i(F0(#8%D2!(Q9Dz1sMGijQ$wLw1zRE;2P5&#+1OA z88BuhjCldZoQE-Qz}QF_n*?K1VC*IsyA#G%!Prk=>}42x1;)k0xHK5o4aNx=w;IN+ zg>kRLxc6Y(2Qa=BjBf|y!(jX*81IDfr7->hT#P>k;~$6dzrgsvU;;294JP!33H@Qh za+t6ICTxNU7hu9iFyUjE*a0TS!o+x(=z@tWVd83-_$o|%8zx?a3@c=`g^Uo$Xa^ag zkP!nJagfm!GP=7UqbFqahK#qA>%4!d(Jr_!Tm4L&l$w31qsOL8cCwCS(Re zW?RS%fy{8o>;RcI$V`CD5s*0nGBY4E8#41Cvm7#KLgsA9oClc;A@gO(d?;-0)$ZiVR{*bLeb|hpcL3RpckB00l$j*W6<&eDrvNu8Y z5y(CP*(V|UZOFa^*`Gj8AmoHYP6x=D0y*W7Qvo^0T#)l5I??cY#kn<(vd<8kz zA?Ihv`2})*h1|xFD?zRw*e#m_ga*sgnBar(v8wAol{~egwJS zLhe<_{Sk7nL+;Oz`#a>`hTOj(uLx!Lf&(b_X6a-26?YT-dm9O4&>c}d?3FK>1*U7=tB6fA>+6;QAT3Lb=l zhoImH6r6&Br=Z|zD0l`6UW0=3Q1Ct!d;|rbK*8rw@D&tXh62|ODEJi$eusiTpx|$q z+z2Me!Q?EMycQ;(g2`8*un82ZP^d#;D=2IOg>9j*9TX-(VRtC(35ETja3B;8fx-z; zm;r^kP&f$+3!%^fg;Sw$8n_A6vg zTcKz>6zzthGf?y?6kUO$8&LEM6#WK8x1qQx6gP)r1BzQiaUc{2Lvbh+heL5kC{BXn z6ev!G;;vBK9g2HFaUUoyfMPckAAsU>P<#WXcwfg)34tl0FeMD8M8cG4D0vJ@PC&_j zpyVkic@|2Zhmx~U@-mdX1|_dU$y-qJ4wSqHB_Be`$58Salza&#-$2O~DESUbeuR=6 zQ1UC3{0=35K*`_Wa5Vyl1dgWQxCb1*;P3~B3JwDtt-%opj$m+vf+HLp9l>D-M>II% zz>x@!6mX=1qZ>GSf};;O`h#N-IEI2_1US;cF%}#Xz>x`#9B?`E!7&*e#o%y&qZAy| z!BGK@+2EK5j)mZGgJTIemV;vzIM#w=12{H=V=FkegJTysYQRwoj(y-b0FDR1@enwU zg5wxCj)UVQIGzN@Gq`X(2aXrO@e(**1;=@Cya|rC!SOCQJ^;rhaC{1mFTn9NIKBnP zHE{d@j_cs~1suPD<2E?{0;dI>KHzKu&gS530Zu<~D&W+?*$SL(z}eOX&UWBz56%wY zw1G1UoU!0c0B15dyMVJRID3GzH#qx&b09c}fO9xFM}cz;ILCuC1Dx65%mZfuIE%np z0?w)6oCeMr;G6}{x!_y?PM24}VsI`4=Spy{0q1&fZUW~PaC*SG6P(rHybqjv!MPut zhroFloDYNZQE)yE&L_Zm3Y<@a^E5cmfb&Igz5>p3;Cus|7r=QDobQA4BXBuC0q5u7 z{0f|x!Fd&&--GifaQ+O=n=loa+6bnG!PH2Y8VytX!_*Nlbrei>!qk~Cbv8_`hN=5u z>Oq)#7N))qQ{RNC-@(*hVd`&CszPZXl(xl1X@4jk0i~m$bOw|zfYL=!S_P%|L+Jr1 zeG^JQfYOhktSyvvfU-_dHWtcqpezr{wn5o$D656CGf;L8%3g=Eo8bM^=xL2$S~yIL zhH0@dEf=OualtePOxq08cEPl2n05lDJqy#GgK3|^v@0;}8ce?jrmHYrhv^A0y(>)b z4%16u`V5#p6Q=Kg>3d-MKA8R@On(EWzXj88L%9!>%TOK)<#s5Kf^yejC?5^wW1)OD zl)IsPF_iCs@;y+#56Yi~@|U6fRVcp!<$pl=UoayaW<oE4!mREvs~BcYhgmaV)*6_#1!ir7Sr5Ui<1p(9 znDrLS`VeMag4q(x_J!GgFuNnnj)U0=FnbKl&W72!FncM?UI()`xM22Cn0*pvpMu%% z!0eA<_NOrW511ptoF*{G26GZ%P7=%+1#>cBP8Q5r1anrvoK-OA5t#EInDZpe`2^-% zhB;SYZgZHcz+4UH_J+AbVD2!OTMlzw^I`5nm|G2V_ru(SF!u$RdmiS#0rM;{uQ|-S z7v{ymyi}Oi73P(~yxA~sF3j5v^A5ti2VmYgn0FE8y$ACxFuys>zZd2w!TjzpzbDMk zg!z+Uei69luZQ^_n7;$&KL_()h56@T{%^3r0t*_$f>2mshXqlvU_30yg9Vdd!5Ubw z1r}_B1*c%a8CY-@7F>e`zrccn0uYxc@1cPV?A|HeU#8eQcK%4<_78YA!aR4k1gvAb6Tmg$`!Q#`f_!U^} zdJUE|f+hFBk`}OJ2rL-`OUA*H9k65%EZGN3K7l2dVaXL(8U;&}VQFVrDq!hqSh^OL zz5+`xz|wbMnFh;(U|BFM%YtQvu&fxC?T2L#!?H(U*$r?l`vaE!1jE5C9u;Ku$I0P%Mz=|8N;ulyM11mej z%2Zgn3RZ50m0Mut2e9%BSosw$R@q=x0<21cRZC&jI#{&2WiST_mQJpk*D!Mev`-Jh^t zhV@Nh{Rmh;0oG^0`c1HYC#N9w_hHlLu<1+K z+z~d%!R7?myc#xF!se~8`9s+JCAc= z2(|>nmhrG954KE#Ef2z$$6(8G*m51V+=eZG!q#4}buerl3R|mT>wegJ5VlFM%@?-$ z!L}*5*j5hPDq!0Q*!C=Jdk#GJfJX(74xS0%$p=pXcn*N)D0m(P&rjgF1)e`(dn#=2 z1Kaz-_QkM$4QyWr+t0!Fi?IDY*rCFXK-ke1c4WhjBG}=*|L=GZc02|Ar1XEWG& z59}NVJ9A-YKJ45JI}gLoBe3%d?7RUxe}P>(>-3C{+ z4^;a@wF1>9R0lwH5LAakbvRUagz7k`9tPFppn3vSXF_!jR8NEI8BjeFs^>uUe5hUu z)ytv!WvG4&sy~71&!GA;R9}Va@1go9sQwMAZ{wo+FQ~CVO;f082{j2&GXiQRLd_(o znG7|>P~(7_IZ!hXYF0qaYN%NUH5;L(5^AgCN<}lP8ftsUG^C;9j1~nf* z&9AUK5O(*5-IH9fdnN4N1iLGt)`Z#ss11VJ5U34<|5wy~hIc_;?*soFWM%*rB+M`q zLc+*Ety^o=YSlVxm!ej!wQg;-w)N|1ZC!PbYSrc&WbY+QR74O5NWzl6AtV8kKqi6z z#r1pff8IRLi*ujnes|7!?)$o);=vT>Qap^}e2R-GK8@ltC_bCw^CV*7Pp5bm#UD{TpW=@x{*>a+DPBtP zauUVgP`r}jQi|75yq@BX6mOw;JH@*w-b?X*imNC-L~$*}brd&Je3atjBwLYeL$W=| zRFWM@{(xjxk{Kj>lI%mWAIX6vvq=t-kQ_>K1j#~@r;_|3$+JkFOY#DeKPGtz$;(Jy zLGmh+*OL4>$zPKE4ard?ZzXv<$va8@f#kg;A0YWK$wx>&M)GeYpCI`($!ADDn;`iD z$(KmJO7aboZ4%+#U#HV`6bD(Nvqu@OxtZiPk~>N6A(<#ASxNFB$r_SJNH&mcCV7mKprkb=?I=m1B%PAZlysq_ z8zns`=}k#rN(NApMM(}Nd6W#Nq=1rqsk~x%oN6Gh;Y-DsWI)%|`jLv0r5u;CK^wo_1 z6{CN{=tmj-B%_~Z^h8F_k}!G>qt`Kd8>4sdAwKNDhpBvcDj%N9hv)O*(|q^>AO4#U z=kwv`eE0<)HZkTT#tuQC1&#!qMbe8w+i{7%MKGX4M)Ixyj6CUj*&ArsDG!Z}R%H4|=Q!W~Tb z2Z;&)X2Q!%n8JjQm@tnC8qaoQdt3n8L(DCZ5H_bC@`aiFY#bE+#(5#MhYk z1`|JF;+IVPiiz8pSkA->CiP^}Kqh4|=@KSg#iVN_OnR6}e`8XDNpCZ$m`No}`i4ns zn6!>b2bt8sq$Vc+fXO|X+?&a#G5I_uU%=$wGxlW89?t(a-0Oxwt`%}hUu>1j;w$n*lHpUL#InSMLd?`8V^On-;z zB~1U2>7O!vIn%#pdJWT0)c@%%%*bZO2xb&8<2TH>of*Gl#y^?yGBaLf#zJO%!Hi|h zILM3!W;8LgFEewPnaj+pnE6X){+gM8W9Bo={1-EqG4nfSmNM%H%<9Rk-psmK!mOV$ z>uP2_#H_zE>v3lNk6GiHHIZ3MnYEHxtC&^6ti#NzV|E{AXEQs8*_Sf=8fIU|?7uSm zAI$zIvnMcnCbMTVdp)zaGkYgwQx(Fi{J3m^TRkDlS9rF^uKk5)0aFLQI4o6FokF*m{7Cz-pFx$Bv`fq7ZX8_v9Z z=H0=(#C^nAgJmw#;wO{0o_XIrFb%{;SM?m-+88e>d|FF#iw>hO^*w z7M#I?KeON|7W|zBpRwR87JS3P6c%=2;VCS$Q5}mK z_~cwZxr9$H<&&5A`FeniqHPVXRq+tYkanr&$jZ}b{6+%aW0F8viNQm|B=Oyuy}HU#dBCZ zm(PRGQ}{fM&#&Y28~J<`pD*O|FZg^JOFFV7gC#v!atllDV#(bsnZ=TiS@H=>jYn`Q5?Y(L9tS$2dk&*969`SK@x`66Gw$(L{Q3NX zKSyHuYb<|*oI&im9MAsP4G<$-=y))@A>9IzIm8$zU7;>e6wD{x4rmw5Z`9=?R|XvDBnKD zw;%KEQoj9?6{)P~%8G8RxSka^vEpV{e87rvteC)xJ*+s$ifUFCu<}e+p3TZfS@|R@ zpJwG^R({ROZ~3kh-<{BZzU#?%xAEOQe0Lw;E#SK)e7BTU?O4^BRVTCRN><&#s$a6| zF;+dzs(-NRU#xnbRj;$^O;*id)hDd_j8%(S^*O7)V^t}o=TLe+r593qF{O!1DZQN1 zpHX@ZrPovX3rc@Y=}na0Lg{Uk{*KbSD7}Z$`zd{h(mzr9XG;HiA`zueQTh)`|3&HZ zl>VF2S15g*(zhsmhtl^b{U4&|1{1+2S_b(gd58P>hVx;I$&HtXJDT?y+xWZg#A?PJ}3)>X0YAnQ+O{f}9HG3zg7 z{ZCo{8tebZ`lN*QAF_Td>rVt9W&JU}557N%@6-6cBi~=c_oMj!7QVlY@9*IIDSW?x z??2}IPx*c^8&cSi!G<1e=*@;qHVkIN5H{S)hQF}kZ)|vi4NtM*`2-tYWW#DUY-htx zHtb=;J~mXdp_YwBY&@ThKVsuWY`lbxSF!OLHa^S7H`(|$8{cK)du$xT#&K*sz{X}a zwy+7CTCpjWP3dg|+ke^iKHJu@Z5P}2u&tbJ6>K}q zwiEN;_N&-_Bil!@{Z_W$#`b&IejnS9vZDh#QrXdw9i7?HgB`utaV4t0TKQv+F8$-N>#{?7Ed*x3TLUcHPIW57{+?U9;Hr z5xeHGYcac)kl5Xo-Tm1;kloqr&S7@}yNlTUM|MBO?!UA9pX`2?-LJCyb@mjo=Y013 zh&>mv=Mwf@#hz=}^BjBLV$Xlr^I!J7&z`aD8PA^W>^Z=mL+q)Qu;&PSj-dq=Z(411@tcLsZBvv)3g7qE8;dzZ2IEB1cN-tX93!M>B( z*N=TaVc*s4yNP|H*mo!U{+M9jBkX&EeNVIR8TLKPz8Bc{2K(M--@EL4pMAybTgbkh z?5n1{2jv4OA4+)<<>yoWBg%hD`IVGkP5E_{-$41zl>e6UJ1GA><#$v5N6HgVQT`(3 z?@<0<%0Hxh9OV-!pF;U`%I8!5G3B39{yF7KDPKwXCd&6vUQ78A%8#)>*x#D{?bx5f z{x0nA#{M4c@6G@ElZyUSTW^2Ga3 z<;PThO6BKNenaI-Dod$cL*;rZw^O;F%6cjrscJ=48>-qQ<_5r|M3s{y^2eR6Rn~6I8uO)k{>pP1U

PhWs(DncrmC3(9XL?HfipR92?ws@z|T1_iUYTD;C2q&$$>ve zIPfqB9^t@a9QYdtp5VZ{99YbO?>NxF!QfyD2QxU>gM$M&n8m>y4(4%iI0sMX;F%me zhlA&H@Inq=&cW+Bcq<2Qo_ zXd8zrIaEjWiTSU(4b>f~{sGlpsm`FfC)ER~&Zc?@)kCQsLG@)+KX3v_^;oK>Q9YmP zB~-7Xx{T^gRBxqv2i3c&-bZyc)rYCBr@D#i7HZm(sOdsYA8K-`8A{Em)clZ|v#2?j znhU78jG8N`xr&-=srflIqo}!)ng^(PoSG-7d6t?NsCkK+SE+e}n)j(Gre-uXW2u=y z%}i=Op=P;+nzhudr)E1fyQtYq&30^i z+R@aGrFH_fGpSuj?NVyXs9i(t7HYRsyNlYr)b6LYmfAXM8>u}??Qssb=WtgJXL2}? z!^1dy8ix~SaQJKvpU2@JarmbkzLLXNbND(A-@xGqIs6|EkLB=64zK6%9u8M?_!viU zqytCNIMRtDCv)T!j%0GAKSu^}WH3i^IdTz4?&ip297&QmGLa*5IkK1|D><@?BO5rf znIqdcvXdiwIC799H5@s@kp_-5Q`efh&eZjxt}k^t)a6k(oVo()MpAbUb>~xeA$1p1 zcPVw(Q1@%i$OE6VyFT-80lZOWmu~y+PgE)V)jH`_zr4ZW?v-s9Q?i zm(;DIZZ&o5sM|o@X6p7(S593ebqA@dp{|+wcGP#Kz7O>${D1X1)aOw@ocaRlM^b+d z_2*N6A@vtie<}6XQ2%S{@1*`N>K~&1Pt^aJ`oB_tBGdUC#MEzFkcTm5Z`hC<_Q{O^ETN=91a0(5XH1wxo5DkNA z$fco(hSO*`gNCzdIFE*(&~QBsH`8zr4foOTC=Gw1;c*(CB+>A98eXK~Wg1?i;Y}L; zLqn2=sWi-^VJQt?(y)q#)ikW5VFL}DY1l(UISrLG9HgO!hGrVu)7XW^el!lCF^|UK zG#1b}lE%|%JYPcNg*0AF2;dk zqUnD$mC!VXrtvhLGKr?yG%cd(OPbcuwC=6#k4KktDrq`GQv*%SG##TkXilNI3(dW0 z9zt^-%_C_(o#r!XK8NP>-zrE%m&Eln-$e7BG(Sjlg61b_evalBX?~gJ*Jyr|<_~C2 z()=OK<7l2p^DLS_qxox^*VDX#=AAU}p}CyqN}3PS+(2_P&Br(z9Bs|fbTW_j;%E*> zb2(bX(bG7321n24=y`8#Kf&=jj^4=8yEytVN1x>A(;R()qc3svRgS*F(YHBT%+b*t z9m~-P9G%S3IUHTg(Qi4rfuox^x|^f>I9kEc101d9XcI?UXhF+Kw6vwA6N#4Yv}DmT zn3jB6ifB2FmNRHMo0f}c`3WsQrR7RmuBPQzwEUKqduVx#mcP>S4_f|3%k#ASo0eB- zd54zwX!##4CA5s8WhO1NX_-sQdU2;GE2;jDykj&Qzk zp>UmWk8r>6knktrQQ=A9IbpmoO;{=H7HWjULcP!=w1^<0w}>negGJqMXNVNtG7j~cSWm@M5|9ltHq+#7oyddqSf~zv(+ZiYO83q zL$ul@PP$*5^k;F>GvcIwz4gjdaeA9?#7QgP`u3?fJ<&z9?j~9f6s@yF>m1QKSF|1_ zTHh^N-z!=_AX+~pT8|U0XN%S!iPrN)>xH6qwP<}zv?1D@B-*qQZO#;JE)i`m6>TmT zZLSn;o)&Fh7HwV?ZQc-V-V!HhB-(s0+H4eUwum;{MB7tD+X15OAklWPXgfr-y-BqF zgJ^q?XnVhC`=Dq$RkO0;`Pw0lJ&+PyB?y(!v#E!wRY?KX&Zn?<{=qJ3A9eI=n7A6pIceqQe-`VZ7)tNpzShI?NRv=8Fy=iw>WP4r@e*b)rLJz38w}bl4&~ zY!@94hz`}FL#;?5Qd)_WHX^0HNJ$kb9YsopNa-O`dW)34B4wmVIZdRTE>g}EDd&ik z^F_)PBIPQPa*aqCB~oq?DYuD~--(pFM9MuPk@6>z@~BApi%5A~q`W3l-ViBoij@C| zl>ds94@Al&kup`JOcyDOM9QZk<#Um;RHQ5yDc^{cH6mr5NZBA#Hj9)Rk#bn191$rE zBBfcR922RjBDJGP?JNr zNIO-eoi5VO6lv#(wDU#Ug(B^8k#?m>yIQ1OC(?c|(*7XQ5_gNV`$XDFp#o<1x`GM5orGQ@e!d z)KzrqCOY*Io%)GR14XB7(P^0IlrK6JiB6}9PNPJp7e%LeqSIp0X^H6cmFToiboyR& z+9o>f6rJ{nPUWK0A4 zMc4MCYpUp)F1q#qXbQP!c7pDvqr(}y$a>OaOiBldBr#vK1`I9*1 zQE|##amp9slx5m-W1*56Wzv$Zc|0Kg`(Rc(d~25ZK>$CTy$F@x~&r3R*P;MM7Qmt+kVll zR%8$vtwcsUk&z-Y(nZF}5|MF=$mlLIGDXH0qSMaDjnv0r2y6d5%lCP%MURt3k6xljAJOAl(c@;(<5tn*cG2T^qR0KB$AhBBc+q2y=rLC$dMpqoeMUiA8f==Cen>qgP*QPJz4qSr*xYnkX(Cwg}my+?}Pe-yo6620FR zz26bN-xIz6Cwfm7y{C%apNig}i{48UqW5yq`y0`FrRZHMdan_^*NfieqIZSpT_t)S z61{6h?-Q3air&ql_c75YL-ffNeNGd7?ht+cEc%QSeHMs5%SE3u(Px|JvqSXRE&A*e zeJVts1ENp8=#yv^eU6FD5Sgt-<_|<>7m?XrWcCu7nIdzb$jla*Lqukw$UIMEULZ0r z7MYid%*#dQ)gtpck$Hp2yjf)4BQoz3nNNt!r$y#7BJ)|1`GUxNNo2k%GT#u1%(q46 zc#%0tWKI>CGeqWWkvUgnE)bcEMCNBAbGzumd52ihdnMzaNNxr;C2S z6#ecN{azORCX0TnM85{nKSckNME};Jf4b=3N%Ze5`u7q2`-=X=>J>M{|?dr1<`++=)XqvuNMRQivbsi z0e6Z4_lp6476YCU1D+KFUKIo069bZBz+^FCsu(a=3|JrrED;0Nive53fLbx&m>Ae! z3>+>7=8J(Bih&o4fr(4Sz{|zJpNWCjh=JFOfxi#~e=P>yBnCbp20knXJ|YG_CIbH$+fV$jE8(5GV1w_?yr zF=(|Iv`!4#AO>v}gLa5PyTzb=Vo-$`bb?5o7}O{RHHoZ~L{?jo)lOt}m58ivBCC(c z>L;=WimYsrHB4mXi>yMCb*jj^N@V>`Wc^cQy)Lre6j>jLtfa{LP-Kl0SrbLpERpq* z$eJ&*J{DP@imXzR)g-bzi0pnMdzi=`A+ifa_NfVx{X>y`mdL(BWdBNJ|3+lrD6(%7 z*?$z-Pl@cmi|l`j?B_)Giz54Fk^P>?{y=2^Ph=O1>?tC9n#le{WG@!kUx@54MfTSs zdxgkeC9+qG?42TekH{_;*@;S#eNbfAi0mUGyFm;3koiCUSa+oL(X)Q{?m)IfF#bV3AWGa?TYw=Zl;RMb5<{ z=Tec#xm@I2D{^iYIk$?O+eOZuBIgex=U$QXsL1)V$azNO{7d9KFLM4Za$XTRuZx^w zky9da#)zEpB4?7wnJRJ?ikw9v=W~&>ROBodIV(iYDv`5V@L#`J?9v4GO#E@xX z$P6)Lwiq&33|SzCED}S$7DK)jLsp0(E5(pK2{EKr3^^i(G>9S1V#qO(8zMJFmuFwiucthUST(!^O}7F?6ID zdWIN!u^9RjG4y9*=rv;K^c+7`jRf zT`h)g7ejZ7p*3RY5iztu3~d%ekBMO+hP4*M+KFM^#IPP>SZ^_`uNXE!49gP3a>TGa zG3`0Q9u;*5Cz$yAV(DBiGtyxpge2{}BaAQ7~E*j28uyM8Q;1uuv2%5(S@&f~BHhxhPm63Ra1N)uLdB zD5wwx4Wgh)6dV_YtwdoPQJ5kM(-WewvncE>3bRCEp(q?F3QrY!Rp?qG-G*nk0&*ilP~!XqG5iE{eVpMJq*7sVG__iq?suZ6Z;$Lll*Z zqDoP8P!!dOqGnNaOpFXMGDVC`7b821kv+u7-eP25F>;6)d72pcLoxC!G4fn7@&+;T zmty3v#mJk)$Xmq7yTr)5#mM`_$Opy9zlf0^ijfn<$kon@k(Hqz}*t*sK?cGn<#{d6{Q?K}cRIb;a delta 33034 zcmZsh2UJv7+l8M|?zJP#tWG5W88Psx|{vlfec_q)%2_bKn0d+u=U5xMrLyh(5TAM>=Q z)Xx&>^RU1C;G=yWZRQ$m9bz479cGl*O-@3rM!n)GB%DURR z&AQ#{vF@<$x87&H-+I(~%=(!1aqA1#Q`Xbg*R5|^&s*QKUa(%Yerf&6`mObf^{Vw( z>z~%YthW^(MN(v?rP50AS9HZtOeI8VtAr~Zl#WU#Wspl5rVLj`D`S)i%0wkgnXKd~ z`AV@eMR6+Am6=MJGDn%KR45A-w<46~$_izTvR2usY*Mx>9%YxZN2yirQ;sM{mE+1Y z$_vUVL4xb~Fx ztoFS2lJ>s#f%c(xLHkJiSi7iwqJ64;rhTQk{?Kk{e`A`x49nVCSeTY6(AEpo2N9ZHgUuz{k;CE{+a%{{)PUf{+<54enr2k|EAwCS{N;jR)(+9+VC^{4XdFT zZHxdT)Ce=8j1ESO(by$#(l>9#sTA?@qls2IBYy<95s#^PZ>`eFB_+fSB(D{uNfDOPmE8E&y3HF zFN`mZuZ*vatHw{p@5Z0TEz@H9n3CDdY-ReJ!Dfir)(kbn%ywp^8D%D#NoH3w*-SCJ znZ3+(bAWk|Im{ewW|)~~mRT5M7MaDS!<=c(F&CRI(`~LYSDS0hEoPOu*F0z*HXkw{ zHIJId&1cM$=1Y~HR_T^Al^?d+=~pDAsX{6d(t06n5Yq8uOME9;e0~#>e(bZ>-@l~4 zFMK`{KA#GoFNMz~;q$%lxgvb79sBIOLzOcR4qtXG`q;>C=ULihM(?}S<7{R{TwG$D zJv1&cA$ifniul;1#A9E4x7Q*UtbIP^)t;4We)!ucSbO5JA7@Och_#1yjg4P4u_7@g z-X7ZZSoP(6DX_S^GJE^(D@e%{)q(B9?j+i{r{3318Z zG$~1SX;KmrkDdB?Wn-gY{qLtgZ&jzAl#;Y)LPbJsoINzQvf%gba?$H8pY%UpdEob- zY^IB-umblDo!MK6T^k=++Mk zpLd1Nd&0+0__PU6KVGo*>dZcVrLuM{`iC_1GDSZ8@fT9sR8Q|J9F z;q!Int-qogFSH78r27)-y3f9e-g$}Zl8{iZo$|lh zYpt8S3PGE#TZp!96+S-*pUZU$KY9(WZWA5&c;)e%VU2fL@AdYy)gEhYy@{*B=cg@y zw`+XRdc>=sJY;>CXzQcG=V#&bOP#{6l_UR-9P)(qS$Eog>yy@}tWR5?5k9{QpFf1p zpTg(ne(MSAbJmmA=Y`KLA^8ZYxsd$4W1mAVdDfRGa0dvV>u%1`j3SDy|EZjQdsL-% ztG{xlK6axa_GV?ttxd|=di<{j{M}0b+XIyk>hZrD@Q*8}-`?T>sr7SrTCMdn;d8sz z`h}42T(elhM_Mnrvumy22+8se-_wlC*6*y}TYs>=L}a#~KCo}Uk+Y}fJBE~&I!5Fc z7Y>^}J+H7#NR5Ql*y}edJpFj))TEfE>(4O_hxI4xwQY}B+DiMZKU;s<_K+pr|99&j z-n7?+B-L7P2uZF>8)^O9oA#EFn%qsRVEsX9M5K_KdDW$+?ySlgx0ZR{uvirTrb=^n zTCLJdNG)oW7D8(2m5J!k+MQji_z9_1gNoJqgQ9v>TGy%gy0fzVx>bsccF)vCmN5SS zCCDojC?x+{C0Iz-I-&MqUZHkEQW}IJtUoA`ULmbcNOfmbmfl|HnbFvy47V$(?zD%L zXeCDJti&pDO1zSwbWswOB&Dm8tfVO2gk%V*jgSI`6fC5+LJAX7xRBZlDN0Bkg=7;_ zw2(RrDefU9&GU9+OF){C5`>f_r0zmW7g9eV4HVK)A&nN&I3bM}QihPSgfv-5`FB^2 zGNf*{CDSw0$D(*reJn9eN7h3DUMRikxca-GdcqrCLPk9lQV;2pC8lXkJrr6GWqYB* zdZ=AJbkGY;tA`@$p=(~Kv>uAAhq}p@n5O0RP=|VGsTZ1G4|S@CUiU(a>mhqRq&2a` zG+kN`#dsmrlikD;-E>tw6kE?a;AO3^hvMs@pPE=={I@7uy>nVAq%O6}HX$X}&1u`6 z?(98x_cx_VsqvDkh19iHxmQTZb)=Ae-ihBYq?C90Ub$a6;4J|PTHmW2QVuI4J&!fD zv{SnYDNRTL=V_)qq&%!VVi~SH>iNE@rDH~_pIo=_m1D|d8N)^oEGa0QHzK!u%3kGh zO8*Jevz}0%tS_n|rNwPrp!zboF|<-aq3Lr8<}=->3}|1G4!|I$}2%3-CjPJc+9 z{dWJ5S7on!tGn{5tC`y3u79v z&Mwc*a~67=?Nh_la7xDw8|}Soa2%<2xH}+fM5W7zIB94(}=|2FI$kkWq)NMwUp zl1oj#+x@!2YKWz&JsJ}A6w-t{iF$hz^%2sGM)Z8@ZoBLn3GS$iMv|2SwNZGY&wvckXRUYnA^S!PL zgp~VlS5uV3%Bnh7d3CO0|LJPF*H!ZdS2NVIyNT6VLMph^={a8cxk4)Zx4gI0>)nyw za;)!le1n=>UE=jBgj8IsE)~)g&$F#8?E+V+Yu#yk)z#`6Ax#sKW3ReST`wf3kfwWX zw6f&2->U9#r|na>soPbLkV=I#Lr63CsXNtO>TV&G32Cj6)_L5%meN7B?$rJ2UUi?k zU%gLAvxKxjNQ;HEq`|~N^+B)L1L`65u#n1yG+Rh>_NhnIht!9KG*?LTgfu_fFVZto zv4nWWx3(;5|D^it|JUS%`kZ=FNEJd_D5OQ68?7y&>8I3J|3B&(^)>Z%A-RO)7Lo|> z`4e?N`GmG_MChGTI4h)4PfYT&Oly?(j{1>1?SAzE^*!}{^|1P(dZEtiQXwr9(i|Zz z7t)IR)sHQz`ic6fMYX6xTIrrFq*X#%vt6+qN&l8+>UZk*-r_2sI=wJF{djTOw?|9H z2x)bFg4DV{5e#c=)NPOYW2yK%$#T~}|r z(+;SAs5jIlNXNXgbx(v^v-(fDO?RK-v{vpm*`8&prB_qG`q&oF>#Ai`bFKbvtDDNt z-tE^y3#h-}?xxc3tFX!uVTR`s&0=lUwjST%Cfl!faYq@GdXhrw-23 zElOZV&F)U#t98n{blCH`ZV3t;r%m({ z#(Ncx)G18TGKBPykRGmA$kht|<)Kh364IkWI_i1VumlAx!OE!zP3QC&=zWow8ffBNKXjqDIq;0q!U6qDWn&K^rDbn7Sbz1I^8f! zOX>&j$%a{4;Ss8(kGZ-&@N`39y}M1eUsvyQXl2tcRaM(m&v>?hv9)sgS8CnYi|hYH6gt&q&I~0rjXte(%V8hE2MJ=iPqlM z&Js;)?F}Ndx&y0pUP$iqI3v5+nvB$5d2XYCj5 zSM4_mp(^r?`(7V==XpOD9SZUtFhb4{3@JGCSxf5wcv$@Ip$d5jL<5J)nW@`Y%1Gj{a46uhRde>w2i((Vccs57XP};d+GLUXRqH^bSJ0 zB&2VI^sSJ-6VmrW`awvSh4kY=y_0U!?RvBxqj%P0^*AA25z-$*mWA9z$iYHxC*)`$ z_s#YTo{(Q!;5i*)xji6F@9R!Gpm*1M=sopbdb-|Q?<1tELi$Na*M#)5kbV);uR{9m z01-s%1Bl=?eGt)ZKkrdl`rYm4P7=~}k8h~u=c!}eseA5>*aUZK!}GeHq33(kWa?S^ zWIbEY(R1}YA>9ztpF;XeNH>M_w~%fL>GlC4iEdu37wc2>sroc`mN!kKmxYXwEkbTA zWS?xmaL>jti|%oSS$=Vp)66nVpPe~u^r+mD;=(?qvrEc_+^FtzK%cA6oAlpDw9$p- zd-eI0{^Q=ya!#*rzEH?geT&7qE8EZi?$cS_8DMl_`99sP3(v)NmSMTe^^NYd2lN&C zN_~~ST3@5D)z|6k^$kL9D&%HDZZ6~<)MKRzmg_a%&;`JwUX+S>K{p60L93w-c@J z@ODe~7qV5zYTdOa#5Gp0sR3=#||4ky0E-% z#P$99gY|sxiQ)zQVRve+{)mwEJM(eO3p^%dqhUEdp+D_T-m5>UKP6;S$ZhsEU#CAS z8l4kzz^Fk}3r3aCA6a}y{KdQC`hNYC_vVg{uAkOlcc<;sU)9g(uL(Is$Zdrjx=(+@ zJC#BX^Tf8dWufM0iuYaI_DCBS`C8+Ub{YTGh9WCLmmijgQmj=eKLbf;9x~~7> zPTTVjpMM#Ry^HXs{5OS9Th7Zw(Y&0R7XrsB1 z6YG2>3Av~D!aH@nClBn~&vP@<@V)|=_;ZCkKbRl=GHB2EVdmRV5 zj36V#omOiE3pu6MXe;Dy6Q`Dx7nT*}<`>>|Y=j%_y`%^sr`8&gLQWeux^#BgKN+12 zyO&`Ta`##zTF5=_Zek>r=r8k9;d)y(lH2NF&G-wYLa-TaP-56pF{h#qRMi^t?7J3XN<2K#e0N2$kVx_WpPNBk$ra_jT|8lY|za&3cR!3-af^cc6WC+96}!Q z-?eR&xKsBTrN#_nrjUmUd4!Nhd$KxN!hD7adAKLNlO@u#uam{-HB!ix}h69wX$jp65DQBFAnqcDvIK8kNRYW1F$v z@EALcoyIO9j~DU;Ax{+YBq3)AIaA14LY{n(Xrr14B8@#pt+ChGSJ&}ux1T#p$jgMh zUC16G-|I=SSq}I=WIW=Xori^-Q)@gba{ih@An1XH`m#IfwN~6~~t+|e7=6Z7j1?DDmv*)LH z%aG7L-bp_!5xaaLI z7N4ej%@@5#;RdLA99U8wvsrMq%6O(9B8^3Cv@$^fyM5XVBR<+RTA!zkD zv^oK;PD86R(CRH{brxEkhgKgzs|(QTBDDGyd;`I^1Ne6GJf3K=xyFL;c^_n0Dk%4w;ud< zfnSwpeUhcE>lyGn0e)|S-`n7K4*cE$zxTlJL-6|;{5}Q0FTn3A`27X`jlo|6|0dwy z4E(L&9|Zm(;9mm%bHRTJ_^$>3YVh9={zt(7IQX9g|5M<92K?U!m;XiZ{}lW`2mdd@ z|1$Vr0c$!~hkN}vm2kHl)UI6uDP`?57J5YZB^+!;zf_e?KFc)Y;K&t@l5NH=b z`yKSgpi7`P1-&`wtw3)LdN}BI&|^SP06h`(uAuh?y+7!KK_3eGaL~tqJ`VH=pcjEY zAM^#FF9m%a=sQ7o)qs8&^dq1j1^qG5kAwaU=qErw3HoW!&w~CL=-+^T74$#A@C8Ey zBMyviV5EW36O42)MuCwDMiCfgV3dO~2aI`OECAy*Fx~-|@i`b@g7G~Vm%+FK#!p~e z2jd19e}VBgm=c&R!Hfel11bA2yMnfn+4EjDYRMcIh$gMay~4-U^oO!fPhI5Fc$*c5U>OSsv+Qh2sj7^y5Yi4p zA|NCRLOMc7Hwa0CkRA}y3qpEB$TkS6fsn%xas)z-LdatfavVaQfshjrauP!R10iog z$afI(JB0iJAvYl8PiPz51ll%(wkovMp{)sR1E6gXv<>n2r&$zN9JEb zZ7)M;5QHW}Xg>%Y0HK2*bO?kFhtQD_S_q*f5IO@wXG7>*2%Qh1DFa^Ri2s0rp z0K#k#77byYAuJBU5+JMs!j?hUW(ccO8L%Anl_X!kg@`v}_o z1>vC(-VwrWo}Jw-ZC!&Pd;ajBtnO-&>;mnq(X=8(4i-ED1Z)b=x_i!yx|r2 z1v)CwF$g+_K*zSwu>*AM80=6))MSv{|Y@NUs4YpXYC4emnY$;$% z16xn9^#)r%u-yZ;!C)H(wvk{P1Ge#Cn*_Eju;qX)A8bWnn+i53*h;}x2DaJYvdsfq z1=tpYO@M6~*j9pV4cOL$Z4=lk!L}W2JHb{3wtK<07i{-|?I73=gY6-(JqotR!1e^# zo(9_qussj9m%#Q4*j@$O>tK5eZ0ErCE-q{zfbAo&eFC=6!S)r{z5(0!VEYklKY{HR zu>B6U8(_N$w%cHD1a=ARO~Kv*?7m?42fGS(1MC4{4+eWE*u%jd3HFX)w}ZVi*yF*T z=mL8(*i*sY1MKNw?+f+;U>^kbp?%2=*ypcYwVF>@&e$ z4)(cVUjX(+V0VLkDcDzleKpwEfqf&`w}9QX4eUF>z8mZ{V6O%Hey|?^`ysF&0sA9h zKL+;WV1Eki&w~9V*k1(uYhZr^>{r452iX6FXe&eqLUb@hcZKL)5ZxQ1r$F>fh@J(} zn<07^xT32d`bmg>0is`m=uaW~TZsN1VyqAo2rWI0NDmA+86+ z^@2DD#Fazb9EjTmar+?dJ{QEDhPbm3cOK$yLc9d=O&~r3;-ewHGsF*p_^}W_9^&Ug zyny(n5WfrJ_d)!95dQ+ipMm(-A^um0{~O|OLqZfJ#6m(mBn*Ru@sKbP66QmKYY8MQ zgM`D7@E9ZmoVuvm<&}Are83$b^K$kht#RXjiblD4C z4nvnC(B(hSec_k#ThU7z#d<>EwhvbVcNWKKg-$F_Y zNKqk0hm<5p=?N+6kWvaM^B`pbq|`vl0Z4fOQeK6WbCB{5q+ElPKOyBNbc=>=Nzg4B zx=n^|#n5dkblU~p_CdG%pxbNE?H%amdJnqYfYe5i>I13KkeURk$&i`{sSZe;4ymgk zbu*+^Lh93y`Vyp`g48RJdL2@4Kw1Q(MMGL=NE;4m6CiC8q%DKA^^mp^(w=~{=OOJy zNc#p{X+J^Q&(Ph3?qSe99J=>|?!%z_28(Bmgu^z?_G0njrDdiI5$ zL!swz=vf9mE1>5h=(z`aJ^(!rL(kWt=eyAJedzfY^zwmTGV~e*y+%W?vCwM|^m+h# z9fn?)q1SKF>pGJNkF!=NQFXc-JT34>0LiSM0Y*20(S9)63ZqBC=nNR01*5mX=-n{78b*H&qp!f| zpI}Tnj2Q%DhQOF*FlIfB*$89ahcTbQm@mLJHU!2-!Pt&4wgkq`g|YKt?5i;L9E^Pj z#>K(7ZZIwl#%+UfH85@ujQbMCU50U2V0>2?-wVd~hVjc`{011m3C5p?@gKwZPhdhY zOo)UD9biH+OqhX-31u+hA(-$4On3?=+<=LVV4@F9>;)4C!otNCbnDiD*`T!hAoD%Q{1P(1hRn;5c@;9RLFTWJ z`4?o~0#}v=vV0(`C1k}xRw`tThpZgP%7d&z$eIFKvmt9PWG#cN6_B+GverP>I>_1p zS(_lM8nX64);`F(53&wG)&r1r7_uINtmBaN8f3i*S!W^Z9b9C+4_Ox=>mp=*23cQ1 z)+NaL4zhlRtY2YrOPH*|WCJG0!Q^f*ISnRHfXTTqIUgpkfytFHc^gbV3X`9K$tPg) z2Qc|_nEWMVhe38H$hJfFbjY6Lg6w&a{RCt`57{q5_C?6P1liw0_IHr|J7oU>**75j zPsnKkIn5wPg&ZAnOvnj&^NIa?ve139jp zkW&RY_d?D=$T_Mf&X16D6>@%s zoZlhm56Jl&a&ALzBgkz5xlxcC3%T)-n+UndkUJ1uxkDg#803zG+%b^54{{%b+@~P- z8OVJRa$knrS0MLQ$bB1f&qMBekozIz-hwAG?_tP03VF{!-U-Ni7xF%Vyw4!-Psnc!`4Z%ZL4GI5w?qDD7vyI`{$$9n zfc#~UzXI}WA^#BMKM48XL;f$2{~Ht}K|xO_NQVM96s(4VwNP+B6g&(Ck3zvwC^!iP zFF?VIQ1B8IybT5Cpx`1Dd2}R{lv;>N_K~W799fqPKP;?ZE9*3eQq38q@orIznq3AUz`UZ-Af})?H=r<_( z1B#nLaZ4!ng<^jwR-w2Mic6rl0*V(w@iHi00mZAKcpWZ^E1`Hh6z_!MDk$Cu#iyb8 zD=5AK#n++u1{B|f;@dE#B~0;!DS(_AoUHrgng-aWFLjracMMo`Gr4!L%1(+RHHQ zKQQeKOnU>Sy$#dO!?gEc+J`XhW0>|SO#1?+eGSvTg=s&)v@0;}8ch2Yrd@|=f5NoC z!QlcNjlm&!Vc-}EjxpdE4~|LT$O4xm2ORm}C<4b+a5%wH3XU>x%m&9ia8!U} zF*pP`mVsj>IM#q;JvcUjqY@n3!Lbt@Rp7W69DBiWA2<$z<1jcL0>`7^cnlm*fa7U! zoWO>xd_#7Nxf#Vx+d=HKv!SNF~egVht z;J5*fo8Y(&&PL#rz}Xa>Ex_puPJeK!;55J)0M1|+I77i14$eq$b_Ay#oSnfL56(nz zCWA8-oISvq4$i*d901Ni;2a9h5#SsR&T-(J2+mA!W`i>ioQ2?=0!{}wOTal3oaNx0 z3(f`LbS(m>8=OnQxdNQ4!MP5c8^O5+oZG;;1Dw0TSp&{maP9}^0dO7y=Miu|0?uRL zJPyvM!1*jVPlEGBaGnC^X>h&<&Nsn%7M$;Z^L=n$0GIP3I6njDm*Bhv&hNl^8Jt(a z`7=0w1Lq&${0pZ0z;qd=cYx_}Fg*dL4~FSuVEQ15k1pO5TN%PoU&8 zC=G{FJCw#i=_Dx4htfhQ-36ulp!7Z{Jq@L2q4YeI{sl99V1^7cI>C$tnBjf@H=_V% zOothzFk?H+xEE&B!i;BN#>+6{6`1ig%(wzGeu9~;VWtT)17Kz{%uI)wePCt@%$y4| z=fliun0Y_UJP0#igPHHZ%=cjCZ76FBWzC^13d&ruP!flud%N3Mg9!Wh*b(jUrY6P=7!K?(Bl?by&!mLR!D-&idg<0!h)&`jM0L(fH zvmS$4=U~=HFzX_?X8i@_K2R=0d3z|2f$~@=9|q;)p?o5gFM#r;P`(_>?}hS%P<{x? z&q4V|P<|20Z^P`SFuOU-?gXba?S|Q_VfI>>{WQ#e2^X_Z!R&8f z_D?YTXPBeHoM4#K7Um?woOGDe2j)0o&TN=77v^k)IXhs^E|~Ku%y}B-JPUK)gE^nV zoX=ryGni|Ixhl+!hPg>FHyP$mgt>Vzw*cm@a>3lqFt-xsJ_d77z}%BC_d}Tb19atd20$*6*2Mdy5K~Gqa z4hza)K?N*W1Pk`Tf(K#2L$Kg1EVuv*K86YjDtw{B4=TDrMIWf>2NgNss+bBD4yf1y z6}zFL8Y*6ainpNREL7Ztg%T`m0t=&HVJs|+hlQE2un-m&!@`ZQa0e{h1q)BY!qc$u z3@rQ!7T$n`f5D<)SQH71I>4f0STq9`mEmI1qp;{{SoAC`x&(`^!lG-iSb@bsus8%3 z_kzU(Vew#CTm*|tVew2@ycQO3g~i)p@d;S`3M@Vii@$@#Kf~f*!4(9q_TY*F@Aq@A z(cV)pxaNUNc)z4t3aN6hb3)cNf<0Cg(dS~$pTp7It5GKge7moQW=)EhNb?n zbObD&2um|yX*DdpAC?}3rC-9*%dqqcEQ^O_sj#d&EL#rCHo&q?um<<;TFa{Bc-*9aaD<8o`Qgu%ZvF=m#qn!HN~IVil}-0al!W z6|cj}matNTl?JRF2`eYT%1l_f7gipIl}BLZ6dmma5>~$jt3QC%7hsJ7Yl2`+2&@?iYbL>(OjzTA zHG5#qURd)rthoYfeuA|fU~L?%O@Os?V66+*3RwFBtUUv3Uw6T}matBPbq1^(3hTzf zx(TqZ7SQHrB$%eX#K|Z2S#2UWZMou&FO>>JOW? z!KND6vE`TRw#?Uc&cK*$FBWpfV9Er$OZ`sGJRzC!q2bs5}i@+rZX#ur&g+i7jHf*!Nw*IhfIBXjU+iGFkA=vgHY;OkJt+?2(!uIK~eGY7&2iu>4 z?JvXjSHRO6JSKPoz>@`@BJfNB&tdR92A<>K`3*d`U(}bO2urnNXdJmU79k6pc?0gt@J_$RYhFw0es}<~O4ZAX6R{`uQf?Wq- z*CVj&DD1idyRO5o8?ZY7c1OYPj~_HJ64+e>yI+Ofe?V0fRE>wKSy1JMswGgh9I949)mo_909AXT>M&Ft zfvTfW^*B^L300?{>OWBRI#j&{Rp+4UeWLq;$tc9O>sYp zvnkG{co4-yDK4P6h~ncZK9S;+C_aVa(iCfSZ;8p#eMJCW>4vKz^sB(q5NB{_g( zj)Y`B$sr_%lN>?vIFi34`D>EDA^BU9XOKLbn{D5SVgdP;7k$EZIt>Jdgg#i(Z_jGD@* zxr~~}s11zT!Kht~Zpr9$Mt5NJFB$z?MxV~;XBhnoqhDk6LPmeV=&u<46Jy#irX6Dj zGiD@Xj%UpEjJcIDw=?D?#=OOtcNp^}V}9m8W6BuQ$k<~T+lsNh8Jo-4e8!%`*h?6D zDP!+t>?4eQjIr-Ab~IzhGPabl8yLHZalyD0#-%ZC7~@W0+=-04fpK>*?yrn{fpPy~ z+?$L`%w*hr#w}#r7RK#i++M~9<5L))#`s*u7chPVs{EylmY_}PqK z$oR#K-_7^~jIU-wIukOO(2WVlG2u5%IF$+4GvQVe6K-e1b4+-R3IAcj3?_WSgau64 z%!J)cC}-j^OiW{9dnO*o#NROSR3_fQ#5~)OxeYh{Y*K?)b32}$J7B#J&UOqG4&Fr z-ow;~nffSGCo**wQ|B;sJyR3gnYxo{t(exHX&sq1glWey?F6RX#<)3z|}N2a%7dPk;rV){s?pUm`An0_16?_v6VOn;y0W0^jl>7OzETc&?UVtOsp zk1`{ekh8a!F_=%bQnK_u5Lz#Ig zGp}Xl_00SyGoNSXznQt5nX8z&hFM*il_g~N&a2GX$ef+b+093}d{n?kBlsx6N6+%n^L+F* zAC>XZYUcK5?qKE)W$r`F{TFkeVs068H!^oKALsFLAs;7-`1p1{zL$^h=i^y?ynv4v zF)x^x!n`!*oyEM1n0E>D-elef%qwPIIrFNScZg34`Q%r8auT0B$|q0r$+LX&IiIZH zlkb_|p81)~@6PP_JFxHBD?FolocS>3o*9VrhGpc4X-lEd3KpZ(!+S zmVU+3<$TtO&wBD%FFw18&;H71ck|g?K3l?POZlvU&s*?$OFsVtpI^u4f8_JAd_IlO zXYhHViDj)=)`n#lvFu8gUCpu&ST>Gj6Ij;37cKarC10Gu7Z>uyMSSr(U%bZ`@AJhz zzNq7i2EII%FVE%6^ZD{szI>N2-{Z?_zC6O0&3ttlU!70ls|)$+CBAx#uioLS6@0ap zuh#K(U%t-g>%n~eC|^I#*U$3xV!r;0ua~pDGs}-X64JQ ze4CZ;vT_b9m$LG6RxV@Z7pz>x$~BapO6lp8CeEVtTuLvX^dd@sPw8cpUPEFA`WmHgQ2I8d|E2VO zN{cD|8G4nDrE~(NlPR4>=}b!JQ2H^Y^C?|K>8F%_PU)AFE~j(_r7J02Md?~f*HgNQ zva=~WkFpCXyO^>|DZ8Aqt0=pcvOiLG17$Z;b{l1PNGQ9DvU@1IpRxxjdziAvDElX6 zPg3>_WzSRgB4w{o_Bv&6QuYpI?@{&vWl73LQ#OvWiIh#DY&vDLDEo-Ad6X@nY%yg^ zDO*O_SCswCf6Bh2tdz3Vl>I>22Ff;5_9JCGC_6}54P~{I)w3#CbquRcX4QGDx`0&| zvFZ|5UCpX%SyjTSX{?&Ts@bgih*gVO^(m{nvbrCu2eA5QD!Dq3)rG7sV)Y%Y{s*fc zX7yvNew@|MvHAs8f6nSsRl{Nn*_k)@)$SCf00a%{JClu%?Q&1*|=pwWqN5G}fNM+6!5G5o@1k z?OUvUhqdpq_I=ilW$k#@ZeeXDYxlACAZu&*;aB`{Hb0!p4;S#m@A%;@2|tYDhcWyx zo*yRiLk;V&t_AB_v91m4In>y64XnF~b+@wacGk^c-6Ga4Vcln}TgLkKtnbPC zUaarK`hKh*#QGtuzlZe+*8h|BPyT#(n)NTU{#Dlh!1|r6-_82HtgmGKVb<5N;dnNj z&W1DDa1I;JW5cCvxQq=ivf*7eyvK$Q*ig)d32d0ehH5q(Wn-|hB^z6_u>%`_!N$bd zZ2SWoFK6RbY`lhzH?i>+Hcn*YTsF>Q;{rA=V&j)={F;qN*_6hn_H6nEn>w@UST<#` z=>|64&8BVb7 z&SA@WZ26~zEibX<6}G(2mN(e)K3hIy%UZVVV9PGHl(VIREr-}r%hsW6{S{kJV(Tev z{ViM1W9tQM{TEwbVe4ybeS@uUvGqf?CfT}yt$WzIm#tN7-Jf7ZMI+I=Ju-{>!favFk&2CD}ERU6a|hlU>ykb{%3@9lILX-GbdM*?ktfFJ<>-?7ouSSF`&@ zcHhkI|FU}=yC<-FCcEdb`(t*`XZIp@f64CU>|Vj{mF!-{?)~h^V9x;doX?&=u;{i|l!YJ@2sRJ@$OSo+Nulvu81TcC+U&<-I5$NcnKe zkEi@}%Fm?yca&d3`5!31g7T{=zmf7=D8HTZJ1I}xP5D14f12`FDSw~x4=Eo{`6SAx zQa*$7*_1D&dDqf=EZ7PyfOr>Hv74xWAK*eGzmQt~ditnf>rD8P|KTxrO ziXBu`QBj+q@)#;xQQ4l#Ur^bF%1kPIP}!f#fmG&EIhe{}RGvWP8C3p`%4?{+j>=o8 zyq(HBsl1!Yd#QYc%AfN;LFH3aK1=1RRK82)C@QD_9G^+$d@2`F`6-p3Q~4#8E2&&X z2@RmW1*o2q_PWmA<)RRL8eQgs$p=TLPC zRezxB3KCUUQ*|9xw@`IERd-T#H&ypi^$1l@QS};C|Do!CRDDQQ2~}gL8c)>>s%BF) zm#R;wT1eG8_F-Rp_8rH*->~m|_Fc)otJ!xW`)*<1?GpCg$-cYU_Yd|x!oCFio?zco z?E8>?U$AdA`Jv` ze+&C>WB)zuzn}dNCfNTl`yXTfGwgq!{V%fr752Z*{weHV&Hn8iNaa8m4)o=~AP$_s zffG4!8VAngz&RW^p98<+z!e<0ngiEy;7=U5kpl@1jN!m64s7JW&Yyh;IdFu7Z8@02 z!Ok4a;9z$S9?QYr9L(k5APx@YU;zh%z!5=xe zgM+&{xR--f9IWMF0|%Qpc$7qS3#wD8?oRapstc$dLG`bx{teZ?rTPr2&!+lfsxPJb za;mSQ`dX@Qrur_bAEf#zs-L0y6{=sS`c10eq53_lM^inH>WNfOp?W&i^CVP%LG?T;?psoqa@HPwfy{)w8PrX@9Ps7axwGd0IjlTA$_HAU2%M9nGGoJP%=)SN@j zCDilCv)gj z4xP@Svp94vhknnY%Q$o;hpyq!^&EPLL&Y4L%AvI!+RCAQ9BSZjYYw;N@Gm&rg~OQ~ z?!n<+93IHwJPr@$@GuS+a`*xc-$&x`6C9qv;h7v>%;B#&yq3f3IJ}L+J2||E!xbFf z$Kg5-H*&a{TGSpxZ5p-RsLiG}huY!Pj-d89YJW-XucK>==U(`KK-OJRyM%^3Ky-nSJsT)PzWa{Qnx0JfisY|S& zZY6cAs9Q_jdg``Qw~M-R>ME(*PhCCrEvZkZz6bTkQlCwIF7<<`A4+`z^(Rt)67{E0 ze;V~?QvW;Zuc7`H>hGie@6;!#e}Y8)Q`A38{R`Cphx)guf0z3IQU4+Jg4cE|cD-HM1kf7n8G(1nki!{7K!|OD>Ny7&;Bxx8;!#Ely(lCpL zB{Y0Z!&(~F(Xfq%oiyyBp@N2eH2lo}8yab7rV)+D(3nPJ2914a975wT8c(3{S2UhX zBPU8hMUPR;XX}pZaD{1^Qjd#)b5RK2!_#BO|(f9_9Z`1f+8sDdJER7Rr{Q1*S z<1`v)(m0>S&uJ{BaTSf5XxvKUb{colSWaU#jfZKhr|}4lKXIfDM>=w(J4Xg`B$p!v z94X?+@fOpctxk@Gq7JC0mJ#*wQzauY}H;>aT$d5j~^ytVSN=zP+99QlAF zNsf%>$T*Hn=g2~ie94j399heetsL3TkzE`q=SU?-4!_km5uFn$G{JW!cWZ{ z#nBcVP334hN6+Qx=O zh*L$JCgKYb-;3BJVvC4vB6f<{BcejIXd_y56fHW57F|V)ZlXn=Xfar{7$#a2iWWtp z#oxqLEgljro)Imc6D|HNTD&Y;ye3+_AzDlqE#`?9^F@nAqQ$48MVV-^MzmNbT5J?8 zHj86!7suQyj(J=h^Mp9&19439tyWLO@vU}=W6Iw;^@%t>(Mz=KEn4;yEeD8}SBjQ5 zi%O9Of6@91(fTIQ z`WDgpcG3C{(Yjc)o+4UL6Rl^8*0V+Hy`puUXx$)MH;L9ii8dodn^Q!a--}jD-xpZO3`+eXuDRl-67i6i*~7^U3<~4qiEM%w96Ll@a3aN|8u8UZk8T zQqB@7=Zci`Maq>TrBbBq6Dj*eO0`Hi zEK=%4YAcc2R-~qg)J&1uT_RGC6{)>NYCn;hEmDVy)Zrp^gh)M3q@E{IFA%90iqwlm z>ZKy}a*=wYNd3D={f9_>M5I0@QlAm2FN@UIMCuzN^?i{#Numt%JL|S)|cC1M2Ez7BPZsIZMEXpT zK1ZZ~Cel}k^bI0?lSHI%6X`oe`W}&9CDIRw^cvB=jcDInv_D<6KTov3LbSh1wEvT6 zf1_xBi)eqlX#Y3S{yx$E0nz>;(SEXM|D|aEqiDZZw67HH4~zEoqWuxk{wL8PM2GgG zL&t>ZkSRL!5FL7n4t+$29MK_PbQmH!3>O`)6dj%s9cGFS^F@b+qQhsR!%ESiOmtW; zI&2ahwu%nhMTfnjLzU=oKy;`P9hyXkqoQLw(J@1G>?S()79IPEj)`p1F;{dPCOQ_1 zjw40K6GX>TMaQ#6$BRYBOGT$yqSF%5X{qS+z35aXI;|0%)`?CVMW-#I(>Bp*zvxsg zIvo<7>O`kT(Wyyv`bl)|FFKzsI^QEYzb8767H4*zB0A3zofnDDpNh`QMdwn{d9CPN zAv#xy&UK;-(WQmx(n@q`Bf4}ET{1+MY|$n6t%B#GXRC8Wm-FAc__^qr_>1Urr|5FG z=yH$f^04UgsOU0FbXg+0EEQdri7sD?uI)wFo}z0n(Y250+D~-7PIUc?=z6EbX_F6E)iWn6J3{yj8u`)O=R>C8NEbCZ;^46NMzh2GVT)@{}36Eh>V2Dcv56M zBQl;B8LxN9 zO(HWyW=oOTMr5Xm%nl;6lSE{86PbNQX1>TgUS$4KWS%TCPZgP`i_CLG=J_J?cOvr& zk@+W)`B#y7x5&I-WIiY|9~PO9i_CwC%%?@>zeVO7BJ+KbIZ(S51t zUN3sI5Y}<5AHgA$mM3dORmK=d3adgh6qgCwG7k?47x==q@N`IPASjOh8c z==opK^L^2?So9nvdX5!6Cy1VtMbBxX=MvHLGtu)4(erE3^IOsLJJGXL^jsx+t`$A2 z#jzod%@)U=DvrHd9Q&F$c8WN5zv$I2A$oNZy}F2AnW9$@(JM#v$`id#6unLoy-pFm zP7}S(6ur(7z0McCekXceB6{5*dfg;?-6DG3E_&T5dfhF0-79+CFM2&Fdc7)ojS{`4 ziC$YpuNslnPh^c0S&7p`*7+jqN|AN7$huBs{YhlqD6(!5S$B)9dqma)BI_ZM^{B{t zUS$1SWW6e~{v)#95?SwwtPez1Qe;gOS#w3!Jdw3XWPK{KJ{MVEi>z-&*7qW7oygiH z5?OmhR)ffD5?Mz@?-rtWE77~H=$$Hhw->#CA$s={y|YE{T+w@w=si^QE)czoMDOE8 z?-NDut3>ZdMekQd@9CoV_oDY<(Wjf}bDZdNn&@+e=yR#)bGhhql|=NpR`mI!=yQYU zbF=7ko9OeP=<|^1^Qh?axajkQ=<}rL^Ni^8yy!Dc^eGj6c8b33MBjm;@42Gy-$dVs zMBhh5-$zB?r$ygqMc?N{-#10yw?*HvqVELJce3a^Eg|~O6n*E2z8{Oe^F`m~qVKn& z@Asl_ndrMl^j#d^zdX_JLecMT(eGW+Z=UG)qv%&I`c;d5M@9b@qJOIB zpCS796#WN^{<)%mf#_c(`ky5FCoUHKuMqw35d9wz{ofV+$BO>rMgRGt|02==)f4S(tLiArL`mYlG*NXmoMgJ<%|A6RUBl_2h{tcpkljwg`3}_(+v=RgQhylaI zfYZc)%f*13MPk5XV!&%+z!WiHsTi=FZZivb73fM${1Qe>xy?9L)PLu6-) z>^>s9pUBP@*|{RSKxB^)*~f|OUyAHAMfTYu`vQ@Dk;wkN$i6~kUoEn)6WM=~i0m6h z_RS*uE|GnY$i7!(KO(XdBKvWX{i4WzS!BN{vfmNe?}_XWMD`ewJziu_6xmZm_E#c% zs~AWOOcw(?h=JY2z@B1YmKfMq3>+W^4iN)~i-9A=z~d5P;4j6%^Tfb=#lR=Tz<0#J zF=F62F>s<7I7JMcE(XpL1DA<`Wn$oJF>s9-xK<3@Ck7r71Diz-k#mg5X)SWviJT0P z(@o@b7dbsdPL9aQ6FDb{oL`BY#K|J(RFQMK$T>^ooGWrJ5INU~oa;r-pGD41BIj0- z^B0lxSCR8Kk@L9787*=?5;-eGPL;_0g~&ZcBCoB;OBH$DL|zY(*GuH}5qbSZ-awHzLL%};io6p= z-bo_w6p?p^$U9r)ohS0H5P4ULylX|?A4T2`BJXCAcdy91U*!E=a`$orqj`%vVSh`cc(?;DYKSmgH*`8gs#FCp>=i~M0Czfk0l6#2gu`KOEg zGerKGBL7;Ef4j)PL*(Bj^6wG(_lx`oMgBiU{=Y>2^CJI6k^hRwe_iB%DDsmcf2_!# zAo3@R{23yDw#c6=@|TPJRU$v}qsZSb^7n}R3X#80w22gIO<#GpsTpvT3ae~Cd) zi$TwcLGOyhp#OJV(>IEc!3zaNFoL= z6NA4JgTE1jzY~Mkh{5Z`;0p??xX?-N7+C5FB!hRzT}XN#e8#n4a0(1l{?VllK@3_T)- zHj7~`#jrMFSc({yE{0`@VSUB00b*E=7&crCOZ-j@yHpIjR}6dPXNMT}gc$aM81|AF z_No~6UomW)7&bu+n=FP+6T?0h!@d*4R*7MI#jqMNJWCAkFNR+zhF>j)Un_>+EQa4E zhTkEE-zA3MBZfaLhCe2T|5GG}KPiSkBZj{shQBX{j~Bzg5W^3M;SFN=5i$HHQ4pe_ zn<&T@1v#RiP!x<51t*AtUx|W~MZu|};CxYVp(wam6kI9_E)xY;ih{q0g8N0mqoUwh zQBW)jrbtA=G*K{96wDC?AB%!dM8OZDV5cb9EeiIEf+|sPKonGq!q%d&vncE;3cHEI zo}w^I6!sN``J!;JC@c_#MWXO{QFyv2JW~{&CkihVg%^v$%S7RoqVSr8D7;-1-YW_p z7lluV!e>O`^P=!YQTUoDd_xqzEec18!pWj=o+z9z3KxjNg`#k|DEw9welH5kMBy4y zxK0#q6NNiOVYw)*6ovamVUsBQNsKr~jA$)JB-)7)?Zt>+h!I`Hh%7N8M~o;EBaRaz zekDenEJmCvMw}@|oFhh@FGgG^M%*GsJSaxIE=EiiBR&-)J{Kdt5F@@8Bfb?Qc8Q{P zqNuAVI#v|rilPEhR3wUy7m1=1MbWQC(Z!@>|k#oezFU81j#mKV% WyVXL=7XMFoTmJv_|CboK`u_mHGVvb( diff --git a/main.cpp b/main.cpp index c03f08025e..f366efad3a 100644 --- a/main.cpp +++ b/main.cpp @@ -51,8 +51,6 @@ using namespace std; -// Junk for talking to the Serial Port -int serial_on = 0; // Is serial connection on/off? System will try int audio_on = 0; // Whether to turn on the audio support int simulate_on = 1; @@ -93,7 +91,7 @@ ParticleSystem balls(0, ); -Cloud cloud(200000, // Particles +Cloud cloud(100000, // Particles box, // Bounding Box false // Wrap ); @@ -103,7 +101,7 @@ float cubes_scale[MAX_CUBES]; float cubes_color[MAX_CUBES*3]; int cube_count = 0; -#define RENDER_FRAME_MSECS 10 +#define RENDER_FRAME_MSECS 5 #define SLEEP 0 float yaw =0.f; // The yaw, pitch for the avatar head @@ -146,19 +144,15 @@ int mouse_pressed = 0; // true if mouse has been pressed (clear when finishe int speed; // -// Serial I/O channel mapping: -// -// 0 Head Gyro Pitch -// 1 Head Gyro Yaw -// 2 Head Accelerometer X -// 3 Head Accelerometer Z -// 4 Hand Accelerometer X -// 5 Hand Accelerometer Y -// 6 Hand Accelerometer Z +// Serial USB Variables // +int serial_on = 0; +int latency_display = 1; int adc_channels[NUM_CHANNELS]; float avg_adc_channels[NUM_CHANNELS]; +int sensor_samples = 0; +int sensor_LED = 0; glm::vec3 gravity; int first_measurement = 1; int samplecount = 0; @@ -214,9 +208,14 @@ void display_stats(void) drawtext(10, 15, 0.10, 0, 1.0, 0, legend); char stats[200]; - sprintf(stats, "FPS = %3.0f, Ping = %4.1f Packets/Sec = %d, Bytes/sec = %d", + sprintf(stats, "FPS = %3.0f, Ping = %4.1f Pkts/s = %d, Bytes/s = %d", FPS, ping_msecs, packets_per_second, bytes_per_second); drawtext(10, 30, 0.10, 0, 1.0, 0, stats); + if (serial_on) { + sprintf(stats, "ADC samples = %d, LED = %d", + sensor_samples, sensor_LED); + drawtext(500, 30, 0.10, 0, 1.0, 0, stats); + } /* char adc[200]; @@ -296,7 +295,7 @@ void init(void) cube_count = index; // Recursive build - /* + float location[] = {0,0,0}; float scale = 10.0; int j = 0; @@ -316,11 +315,11 @@ void init(void) // Call readsensors for a while to get stable initial values on sensors printf( "Stabilizing sensors... " ); gettimeofday(&timer_start, NULL); - read_sensors(1, &avg_adc_channels[0], &adc_channels[0]); + read_sensors(1, &avg_adc_channels[0], &adc_channels[0], &sensor_samples, &sensor_LED); int done = 0; while (!done) { - read_sensors(0, &avg_adc_channels[0], &adc_channels[0]); + read_sensors(0, &avg_adc_channels[0], &adc_channels[0], &sensor_samples, &sensor_LED); gettimeofday(&timer_end, NULL); if (diffclock(timer_start,timer_end) > 1000) done = 1; } @@ -367,7 +366,7 @@ void reset_sensors() myHead.reset(); myHand.reset(); - if (serial_on) read_sensors(1, &avg_adc_channels[0], &adc_channels[0]); + if (serial_on) read_sensors(1, &avg_adc_channels[0], &adc_channels[0], &sensor_samples, &sensor_LED); } void update_pos(float frametime) @@ -493,8 +492,6 @@ void update_pos(float frametime) void display(void) { - - glEnable (GL_DEPTH_TEST); glEnable(GL_LIGHTING); glEnable(GL_LINE_SMOOTH); @@ -627,6 +624,17 @@ void display(void) disp_x += GAP; } + // Display Serial latency block + if (latency_display && sensor_LED) { + glColor3f(1,0,0); + glBegin(GL_QUADS); { + glVertex2f(WIDTH - 100, HEIGHT - 100); + glVertex2f(WIDTH, HEIGHT - 100); + glVertex2f(WIDTH, HEIGHT); + glVertex2f(WIDTH - 100, HEIGHT); + } + glEnd(); + } } if (stats_on) display_stats(); @@ -774,7 +782,8 @@ void idle(void) // Read network packets read_network(); // Read serial data - if (serial_on) samplecount += read_sensors(0, &avg_adc_channels[0], &adc_channels[0]); + if (serial_on) samplecount += read_sensors(0, &avg_adc_channels[0], &adc_channels[0], + &sensor_samples, &sensor_LED); if (SLEEP) { From 0fdcf1a56603a5a65ec78e35d9bbfc9bb2e0a721 Mon Sep 17 00:00:00 2001 From: Yoz Grahame Date: Fri, 30 Nov 2012 09:59:33 -0800 Subject: [PATCH 024/136] Increase colour value gamut --- cloud.cpp | 2 +- field.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud.cpp b/cloud.cpp index 329b840ab5..f80953cb50 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -10,7 +10,7 @@ #include "cloud.h" #include "util.h" -#define COLOR_MIN 0.3f // minimum R/G/B value at 0,0,0 - also needs setting in field.cpp +#define COLOR_MIN 0.2f // minimum R/G/B value at 0,0,0 - also needs setting in field.cpp Cloud::Cloud(int num, glm::vec3 box, diff --git a/field.cpp b/field.cpp index 011fb6515f..de5bd2cc06 100644 --- a/field.cpp +++ b/field.cpp @@ -10,7 +10,7 @@ #include "glm/glm.hpp" #define FIELD_SCALE 0.00050 #define COLOR_DRIFT_RATE 0.001f // per-frame drift of particle color towards field element color -#define COLOR_MIN 0.3f // minimum R/G/B value at 0,0,0 - also needs setting in cloud.cpp +#define COLOR_MIN 0.2f // minimum R/G/B value at 0,0,0 - also needs setting in cloud.cpp // A vector-valued field over an array of elements arranged as a 3D lattice From 4f65339daf2a2129119e721aadd998a8868d3e49 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Mon, 3 Dec 2012 16:06:18 -0800 Subject: [PATCH 025/136] Made particles colored simply by their velocity --- .DS_Store | Bin 6148 -> 6148 bytes cloud.cpp | 4 + field.cpp | 85 ++++++++++++++---- field.h | 3 + hardware/head_hand/head_hand.pde | 2 +- .../UserInterfaceState.xcuserstate | Bin 104561 -> 104649 bytes .../xcdebugger/Breakpoints.xcbkptlist | 6 +- main.cpp | 2 +- 8 files changed, 78 insertions(+), 24 deletions(-) diff --git a/.DS_Store b/.DS_Store index 8a70b493dd03a6d3d3fde862d0b4df0112a17ed6..42a5f685815aa20bee6f01dd526b6a8fb260283c 100644 GIT binary patch delta 35 rcmZoMXffE(%F4{Fb#<~Is}^5wzKcsrX-P5z!NXrbrAvp)a?wA delta 35 rcmZoMXffE(%F4{N@a|+kRxQ5Vd>5CL(voBbhBF6xb2cAfbrAvp-){`7 diff --git a/cloud.cpp b/cloud.cpp index 155540bc01..3a73134441 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -89,6 +89,10 @@ void Cloud::simulate (float deltaTime) { const float FIELD_COUPLE = 0.005; //0.0000001; field_interact(deltaTime, &particles[i].position, &particles[i].velocity, &particles[i].color, FIELD_COUPLE); + // Update color to velocity + particles[i].color = glm::normalize(particles[i].velocity); + + // Bounce or Wrap if (wrapBounds) { // wrap around bounds diff --git a/field.cpp b/field.cpp index f450c38525..aa3b496fb7 100644 --- a/field.cpp +++ b/field.cpp @@ -12,6 +12,8 @@ #define COLOR_DRIFT_RATE 0.001f // per-frame drift of particle color towards field element color #define COLOR_MIN 0.3f // minimum R/G/B value at 0,0,0 - also needs setting in cloud.cpp +#define USE_SCALAR 0 + // A vector-valued field over an array of elements arranged as a 3D lattice int field_value(float *value, float *pos) @@ -35,11 +37,21 @@ void field_init() // Initializes the field to some random values { int i; + float fx, fy, fz; for (i = 0; i < FIELD_ELEMENTS; i++) { field[i].val.x = (randFloat() - 0.5)*FIELD_SCALE; field[i].val.y = (randFloat() - 0.5)*FIELD_SCALE; field[i].val.z = (randFloat() - 0.5)*FIELD_SCALE; + field[i].scalar = 0; + // Record center point for this field cell + fx = (int)(i % 10); + fy = (int)(i%100 / 10); + fz = (int)(i / 100); + field[i].center.x = fx + 0.5; + field[i].center.y = fy + 0.5; + field[i].center.z = fz + 0.5; + // and set up the RGB values for each field element. float color_mult = 1 - COLOR_MIN; fieldcolors[i].rgb = glm::vec3(((i%10)*(color_mult/10.0f)) + COLOR_MIN, @@ -68,15 +80,27 @@ void field_interact(float dt, glm::vec3 * pos, glm::vec3 * vel, glm::vec3 * colo (int)(pos->y/WORLD_SIZE*10.0)*10 + (int)(pos->z/WORLD_SIZE*10.0)*100; if ((index >= 0) && (index < FIELD_ELEMENTS)) { - // Add velocity to particle from field - *vel += field[index].val*dt; - // Add back to field from particle velocity - glm::vec3 temp = *vel*dt; + // + // Vector Coupling with particle velocity + // + *vel += field[index].val*dt; // Particle influenced by field + + glm::vec3 temp = *vel*dt; // Field influenced by particle temp *= coupling; field[index].val += temp; + // + // Scalar coupling: Damp particle as function of local density + // + + if (USE_SCALAR) { + //*vel *= (1.f + field[index].scalar*0.01*dt); + const float SCALAR_PARTICLE_ADD = 1.0; + field[index].scalar += SCALAR_PARTICLE_ADD*dt; + } + // add a fraction of the field color to the particle color - *color = (*color * (1 - COLOR_DRIFT_RATE)) + (fieldcolors[index].rgb * COLOR_DRIFT_RATE); + //*color = (*color * (1 - COLOR_DRIFT_RATE)) + (fieldcolors[index].rgb * COLOR_DRIFT_RATE); } } @@ -106,9 +130,10 @@ void field_avg_neighbors(int index, glm::vec3 * result) { } void field_simulate(float dt) { - glm::vec3 neighbors, add; - float size; - for (int i = 0; i < FIELD_ELEMENTS; i++) + glm::vec3 neighbors, add, diff; + float size, distance; + int i, j; + for (i = 0; i < FIELD_ELEMENTS; i++) { if (0) { //(randFloat() > 0.01) { field_avg_neighbors(i, &neighbors); @@ -117,9 +142,6 @@ void field_simulate(float dt) { field[i].val.z*field[i].val.z, 0.5); neighbors *= 0.0001; - // not currently in use - // glm::vec3 test = glm::normalize(glm::vec3(0,0,0)); - field[i].val = glm::normalize(field[i].val); field[i].val *= size * 0.99; add = glm::normalize(neighbors); @@ -128,12 +150,27 @@ void field_simulate(float dt) { } else { const float CONSTANT_DAMPING = 0.5; + const float CONSTANT_SCALAR_DAMPING = 2.5; field[i].val *= (1.f - CONSTANT_DAMPING*dt); - //field[i].val.x += (randFloat() - 0.5)*0.01*FIELD_SCALE; - //field[i].val.y += (randFloat() - 0.5)*0.01*FIELD_SCALE; - //field[i].val.z += (randFloat() - 0.5)*0.01*FIELD_SCALE; + field[i].scalar *= (1.f - CONSTANT_SCALAR_DAMPING*dt); } + if (USE_SCALAR) { + // + // Compute a field value from sum of all other field values (electrostatics, etc) + // + field[i].fld.x = field[i].fld.y = field[i].fld.z = 0; + for (j = 0; j < FIELD_ELEMENTS; j++) + { + if (i != j) { + // Compute vector field from scalar densities + diff = field[j].center - field[i].center; + distance = glm::length(diff); + diff = glm::normalize(diff); + field[i].fld += diff*field[j].scalar*(1/distance); + } + } + } } } @@ -145,19 +182,29 @@ void field_render() float scale_view = 0.1; glDisable(GL_LIGHTING); - glColor3f(0, 1, 0); glBegin(GL_LINES); for (i = 0; i < FIELD_ELEMENTS; i++) { - fx = (int)(i % 10); - fy = (int)(i%100 / 10); - fz = (int)(i / 100); + fx = field[i].center.x; + fy = field[i].center.y; + fz = field[i].center.z; + glColor3f(0, 1, 0); glVertex3f(fx, fy, fz); glVertex3f(fx + field[i].val.x*scale_view, fy + field[i].val.y*scale_view, fz + field[i].val.z*scale_view); - + if (USE_SCALAR) { + glColor3f(1, 0, 0); + glVertex3f(fx, fy, fz); + glVertex3f(fx, fy+field[i].scalar*0.01, fz); + glColor3f(1, 1, 0); + glVertex3f(fx, fy, fz); + glVertex3f(fx + field[i].fld.x*0.0001, + fy + field[i].fld.y*0.0001, + fz + field[i].fld.z*0.0001); + } + } glEnd(); diff --git a/field.h b/field.h index 354a4a8149..6fbdf262db 100644 --- a/field.h +++ b/field.h @@ -24,6 +24,9 @@ const int FIELD_ELEMENTS = 1000; struct { glm::vec3 val; + glm::vec3 center; + glm::vec3 fld; + float scalar; } field[FIELD_ELEMENTS]; // Pre-calculated RGB values for each field element diff --git a/hardware/head_hand/head_hand.pde b/hardware/head_hand/head_hand.pde index 16be76ed04..8618674cc3 100644 --- a/hardware/head_hand/head_hand.pde +++ b/hardware/head_hand/head_hand.pde @@ -9,7 +9,7 @@ Read a set of analog input lines and echo their readings over the serial port wi #define NUM_CHANNELS 6 -#define MSECS_PER_SAMPLE 10 +#define MSECS_PER_SAMPLE 15 #define LED_PIN 12 diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 5069cead6418bad2f51d22121b689a2636dda6fb..752de4bf0043e4b1c6967cc4d4c515c7947f5a6f 100644 GIT binary patch delta 40972 zcmZ@<34Bb~7yjPly@zC$x4)NtPcSpt6CsfVi6yoWI~6IRT0}L9T4rvwMN#x(Xce)m z7PYptmM*%fwW@SMRjZ29h3@{pH-l;W`?JKnvwY_}=iYPgyYG$boB8)&;pb{0FB=$rhqA8S}~Wq;Z}+dj`e-@eHHynUH{g?){Ez5QkTM*9}~cKd7g*X{f4Z`t3qzh^&aKWsm4 z|HOXU{)PRV{VV%7_V4Uh?AJW@pY1p8f7t(aSR98jzx~ej^`ZDJC-vdqhpg}v*Q)V8;(5=&tAtq$3e$Ojzf;a zjw6nvj$@AFjx&z4j&qJHj;oFz9M>Gz9X~pLa{MenU<8X`7aW2hh(fdwBg6`Ef=h4< zX+pYCAQTEMg(9I`=qPj&`Uri6e!@^;nDBrwTo@}nDvT4x3loILgo(l=VY)Cwcv_e# z_=H6sVX^R>utr!btP|D?FA5ukmxPyv*MwcdZsB#|4PlS4SJ)>U6h0CT38#h6gwKU9 zgztn)!uP^u;fioo_(AwX_*3{x_*)DW!$hlS6YZiy6vQYoT8t56MW^Tz-C~;9LM#vq z#g<|Rv0UsZ_7?kyeZ_uaf6+5QyiXh`J}5pUju6L-6U4{FiQ+VIx;R68TAV4)5@(Cg zh|h`7i%Y~8#P#Bf;s$Z2_^SAtxJx`BzAL^bzAv5-KM_xgpNe0L7sPMGZ^fU)pT%Fq z83$PMI%awFLx3$h}I%Mo&<94{xxDY8@cxMa87 zS?(rxmwU;*<^J*jd5~N!*T_TV2jz$4T6wfwCy$jU$dAcWWRE;menOru&yZ)!&&bcp zR9-AUC%+)Clvl~?j_d`LbjpOin7&&glQm*lJR&+<+AFNIMWD4~j= zC`yD9t0XF^irb@PD$SIZN*kq}(pjlcdMUk?`;@`T5aj`7l=84LPMN3>WtuWwnW4;6 zo>izaUs<3mQpkMyv5^f|{bHs_ANmnyF@~*=m7WsJ2wg z)V69nwY^%Qc2m2n{nY;I0QEk#MjfgSQy)-As}HMVRF67UeM)^=ovF@I=c@D71?pn; zId!SJOx>z(Q@5)-)Sc?9>TBvQb+>w0J)#~}kEzGikJS_EC+bO$`l)(Zy`o-Ke^9Tf z*VP}@pVXh#KQ&$xG*Q#FNG(c>(-O5LEmdo*xwLdGTWh8jY9(5!)?VwPb=7)my|h7E zwRXQYSR1BI&>qt!YLm3d+7!*Bc{QR<(`IXPHL5MteA@HcT5X-SUVBm7puMDR*0yP$ z?b=(~+uA$Y0qtGwJ?)ToSUaJ8qJ6HN)4tO#Y2Rx~~ zjGm}Fb(fx_=jt8wa=oM8N$;$8(YxvudN+NrK18q4hw8)h2lV0kgZfB)wEnn0RewT% zQh!RHrcc*r=uhjj^?4qh>WlQn`X+s|{))au->PrZx9dCfo%*Z#Yx?W@KK(8IUHwD- zxc;$zLjOcRsh`%r(7)9$>fb3b`X&8_epCNd|4skhfWa7?!5daXFcOSJBgsfMQjAoi zvC+h6YGfFhMl&PdC^AZoGNYr>&FF6oFzz=}hZ>`ehmA4DBgO<{vN6SY!g$(v)}Y3G zV~O#CvD8>&tTomd>#5Jyia9_}*%pRZ67C_wjUwD5gqugWX9@QjVI72ZY@KfJ#xz() z*xv{jx%I4LdqbPxt?+j9R(dPFJ-ijPTX-@Z)Ob^*z#Ky(gke7u>n*5f;=J9>>!BFN zBvNr;RIz|N`c!Yhi%est3DcBuGA_o=q%rAC29xRS>Fwq1?d{|3>+R?5?;YU1&pYrA zMh=~~GTy>8qvF6s3)6yjOH8&fE$Ni(3=302=L}4-Fm0%JaFT^-OP?ugXJI0Rnw=3VZs_pb1+^se%*zD?BT(>V#%Q6lblR8C6UovKT7HqVj#t`z)xurBX}#l@B1lz;H{vU;R$r; z1Fvn>nvBb7%Lsf!(~1u>i4$g}X-1e;-iq67c}I{rFL?`I-lm;%dxDBIp}Si*lNe{u{-YDy6HF?ArxWILZ^mK^A@fQeIoS9(5HyyBVsv3 zEQeQyP7j?C`ZTc|A(o@Wa*Q%dtGhqzt*Q^D#Bx07yfE|yZ{eCyU+ALH#i7rIJ|DV- zSUx6}6U6cfv7989Pl@H!n$V@8%L3dPfmlwHkiev_^j6T19%xKQCv3HazT_>~BGK3l%d{En(Yf{n}jSRXRG;Z3){=mu0#vVSDJ2hqEkU`ze!^LWi!4qA#wqG4IlD zSxqfrA5ia@I7`?^{uvQ=gu2&fF~{kk^;yg(H2sleCX7zb&Sb*qrtD-(*crO}5_aL)y2eZx?UrNyd@RRl3A;)?b%{(E{V*rXG|F)?Vbq!1)DrePO&^=hgwdI~ zag3F|l-t~54WWC-IxSY7R=nh*C3%S!tCiNjl+8Hk!;i)@R=PXS{P{&*lErG!;+M0S zNNOLKz*uSDW=@MWo(>%6wpf$s^_QLWY_lYbwJH5_Try*&uKYB{O8e)hSghG}??xxx zn4iU1>A8G2W2KJfO&eH?z2Q}YlZvlsG;KnZ#oC%~-jv3a(KDNxGVT2fk+maTwmFh^ z-W*3WTVz7IgYi*-0n z-;&FWpvD*$T zHYyIYSQpc)Q!*{qB{bgSqEolW(@`Fq#kzu?+wP=CiqkCCb@ck2SSqwq>F6C1v`a}( zi*+MCx1%GyR?^F2-R56)tvhLTd($X}9%&U}vF@VROLAyj>v)UxjoTWPc6~h2V%>iMzH?$qd!UD5QF()JeXr!Lt3J=IpWSg-g2GNy&ozn>B<)@xLp)`qdt$?YmE z)*Jq%&f8w48RZ;3J597$f1^{%n^5nbC_1QpAB*)Cozl)tg&8dUzI_b+c~3Si>=0wI zakRRe*SeqH7Fe zqqSXoSZt+~IZ(;8qu1wjw%E#P$=p7sX+=kit%Ah8AKs_2w< z0lX{Sqb;`k{TVOs5=!ST4x=TNs>N1A(`k{#_5iJ(pGDuVjJ4PvqJO`gLt}cRSZt%{ zpw|QL74sby+rzYaem9G)j$YquN)_tpDOqgeXw`>ZnaAj+5Bo8bX?$l>Wv9yyHlRfZ zvl%b7FT9_QKFHGr`y&IIg|QafQ^rJkwr?+9T&7tIQ z4z>1CsB|cgF7Lz9{(T%4+k!yw!x41yBCEx=D3H9$4nZBJYwpDa=SF@lFT5%+bX7uZ2vAswKJy&kAy-bCpZJEtWl8~23$VL*fnS^X1A=^mE4ifSz3E4$LUMC@YNXR}C@+Jv+n}i%7A@7lp z4@k&C5^{)y93dgcNXW+|fBI|;j+uzLyn7Gd8d?1zLsOj@wV344;T zpAq&9Vb2rxTf$x<>{Y`4NZ4P@A`e0}?zcC^5N`wtsrUMC)`w!4J&pc#yq%F5kgp8N zbIn$0L-&8&f-d}6mh%G!tAhrG^!1Oc@DsQ;&kiv9T?Dq$BuLN~N zY4zvDH07jd3=inG26ZEYWG0?e<wFPQjgf=mK}-9dyW{Roeq5fuCMfbxx?au&UQx|2L7Am1C5Qy)>7){1>$K(jxn zdCt^i(!$Rr8ITa=E3Ftlw>fWT)pI0<|Cm=se_npe5 z3r;1<9|QzPgNBFb^)IvNjn5s%(SYuFPf zlqc!FQ$38Y0~e=)mKX0r`5I}AsMx;`n0yvA`N0<*!DP`xUkdV%0p%A#r+`FLaS_uK>4#@{#n}3ma}XKFb8d+!+e z$RBMMw;V7rD?2ya6_?p8FK=?+iP^huu&K9mh`kh@ ze%^7>^!eIx!SRjbTf#OWY*WHI3F}(r_|9<&ZpUT9x(S=+_enST&uvLT|MA+(8!U|y}+`?+Y-cQ)Uey1V+G>-k;I78SZY%?7<3$F-U zgsp@fO4wn9eSomTR|(sN9dHY;681sDKIC^C;dfm9OG<|Drr_`M|F<3q?+Ip~uP5v% z!q)m74La&dRQeIAHSL^uhzaEh=G6LyT>=MhT2Pfb4~d~FguE1VO)63!ELEMXrd z>^Q=XUnN`+z6r#cK-kA_$1w*e=W2BNb>W5?>;E?0P2pF#h2IG~iLjIXep9IBr{r{q zoVRe5$cPp(L}Ur;B`hKAy;TD?`b|ztG`F&>77k^4g&k%F(87EI{2DjLpuyY7I z*Y7ir-ukJDr&uh#r%##KHjor0?EFAdiLG|@`u1D<*(rgAEGH+pkcpke?q+?uh+V}B zu^V9*64po9MTA|vN~{!nz%BM7>~n;D-Ve2;N-!$M&@~qyrH!toWQc>rntKu%Dh>+} zSxVSt0V4E|Ut-foiI3dVr%oITw>XZl^@LsF_gP6h+(=2EEIw|soFaNeuSf{Hny_mK zyOyx)R*6%^C*T&JBJ6s?zUcSa;GZ$4uO(-QbHxSs47O18!7VN(?8}7R=y%*if4tE+ zeVMrGo<6I^HG!mFA?%jhNp-jxmHx80<(@uU#cgnlI|#dtu-pAUJLvqI$?30)`|s)V zruY`z;yZ+Wm9T*qHoNG>n~l>y6pxrb|E~qaqvA2R#g7U5I$_`N$Js-Re|4sx7SG%> z&ROvs+~Rq{?j!7ezt5X=@vlwOzZ0+C)8_~A8rw$wrvEDbbx&@8 zi?`sG7{b0s*!TTDAJ9&}HA!zES??7`vPpKhC4sO93Hy=X=MY`>Ta$E6io91GDN2fl zTZ$#@5yBqz`y8Wx{+61aEH%BSk5h8NEu|6mW5S;B`+P#X|L#oBk(%F=TMMZGZmA_< zKPBubzt3s<((fsrHd1@j=l|^?QU|FVZmAPtKPT)LexEPRJ*4i#7ek``otc}HV{W@y zx&PWer0!Dhdvfn1^@UsNPuR1BJr~$^zoI?=NKLPnhTfC>FzErfr3VT7HDNFKiF`vh z{LwUhv^4gfK95S{0u%otVZZbH7?k}p-ZMqI=cXw=Aw3DVG>x#|6ZW#-=Zcx|*mokj zEEzB{vsp$nS6pU(c60N_FeA6wzq6h#-E-5Fs5BpLX(3^MXhhg+e&p-)LW)@N^@cq~fg(@@c=E z3!ZMIFX_lz-KqL_3B7fT7tcxOy@mDCSA+|xm%b((>$}b{iN(@IZ&khY9pSi-aS4rZ zS^8eOEM1YVBBe^uI+T`m8#{dPL!D}CAF8MxR?}td$op%?5Uv5?8WN88R?^yAvwWjN zm`06`;Xynk{UrUoU^5fTt(1O|ZY8=^!rA@kHg6>z{r3~Tj~GT4!(^McuwJ$jPNq5BhgzH1N{)8JqxIu)wpKyZ- z=RZjL=Z4%hFxJt&hgnAU6|hVi)5G^C%S8B|V41XreFN(Ffchg-J<#_I$3*xvj!A1c zIG|1rsH-?8jd{Qq$1@SWb*6e`K;0yuzHW-f1Vqk&s0GiYG2?uHm>yGkChhhEGA=!! zK4_|mZ*~JF!lyQ1(i%P$@Xj{Xim$2x<8C-JAj%DRtTnCX21LyQqH7J9G;x9KGskKn z;hNXWiwM`kKUT3zyj8)+$?`I}-ZWZHxWamQ1>suyjbhfA1G<)QMJI4cUN66BPHhZv zERkQ9H_APH+Zr+nN-^PD5ia&P8p*H7TjZ@wl)TM%sUg#(e~G~R=d-*+-r2uPWrtBi zYQ|So*FCU8eigO<6a=hx$-4v1(y4Y>Y3<;#BWp(0b*dd)T{paT)Jpkv`3=WY6)i6J++<&qfZL$gtvU)@rdpEH1DB*_RNqT|_{}|yO{4czj^jvR> z*^Ys%n+4IPDo>hRpCH_bdgUp?jr6^4V-g}~D$jTeS17ZT*@UYl+~^g`9Az%y9wyuv z-)}bN{$!u>ytiN$^g!2-P5bkl`Zw@A|)qBdp|7ZG_TlvB!SWux-B@`a!4bi&Ob+(g1X zO}LqBlrxN=oKwDH1V$j-EN=zjW)p6Xx58UdB_sqANS2xDp6}5}xvX3ts>l7!fhbjM#60&+zu1hzniOJft_!@7r_^CQX^Feh!^?}Dopp#z>5NIkr(vr ziLx*?BA|WF3p(YCbl(<*kwRkwvL#+n;p=!>5wH6;tBhnz3don50sL=U%vU7Vc=Ov# zR<()h^5(Blo2pL2Ehk+43e~Nq5pD(HR{G{@Oq0l5HUB>b&D9o!TTQq%zH=HA6PXYzA6bl2YYDfLaIYqoJTccl4HHYI zjrM=IJ;Z9Ax8Ox}tnGW-o3=yfsXnGoR41vE)hUGA<$oExhJ@_#rV{RT!tEj4KEl08 zxVMAj7I+QFnXdsG2EHMwK1nOSX%nUf-T`uNctLCZw~S?B>g>QE>@{Uyr{8|t7#gNN z8<6k!g5Ek_8m9UJ*Kc`2m!54MrY;FwzvBhXIFWwmr9^{){_mox_3De>!gcBjb)~vW zU9GNB*Q)E(^@MwuaPJZBeZqY}xDN?;kZ>On?$A1QgZh&CGTd;eE06>?;SLjiJ>kvg zDBcjlz3UYS>!=cnd}ni*a9>3flk6KG$?Wk}MKa-@yRD$Up&l^9?os!u`_%pFo9bKY z+v+=nJ3_dlggZvKJ_||{X3MF^{BqTQ}z8dBdP?ULupy%1GVG2R6ja=San_PnEN_7O3TWI46myl(`rmj zb(P=_aO@t<4EOyV#e{qM4jxfEc1YUocSM2uoL0}9L_bqMSHDodRL`hq)pP1sggZ^R z&j|N9;l3c;mxMb*xU+;iw@&?9yV^*;ZXV^nBizMc<^1cU7OlnjzDQ$YLsn?9zR%K_q;kJwg5Uc4UX`_D z#|*By1DE2LUg)$&klesY%lt3(*WB$W{+L%j!P}GRk%Cxq=FLIbvR%?e^^HWOX-N~+% zYn{A>^;$>5-K^I-6Yf{vbtjWqrghUQO?h|1{Z_B_Al&agI#&-Lb;r-D_0a}+^H*wp zwSHQE!u>(GKMD8OO6@+RXjO#!+niIHZl3tLn9(gu8XeThoSX4|@yOY7c4; zX(P0e+9<778%=l+o*}%2@F9d}3C|IpU#pGL9?|Nwv08*SP8(17283@&cn9HS!d)c1 zLU`5ZbTb<~|Cm3IYg5hngA~Hs3E$|>-)LLR;n+%e-8UhFiObh^YHxT8S8K0ouW7rq-P-Gf4<~#C;UftjMfm8|jG*n+ z_A!F?rniFdF@%pZ|4r~6&0xBT+6US}bDn%i_}F^$ThzYnOeRItj%vqE@iD^3*CWMS z>3cAfiSDd@s-5;0)`#{Xmc8}bXM|6@y<2HtdJ9%)XSB0~Pa?c|yzwt_R_(m@jW>Tq z=#bDFVtJET-ddr3t6d~~GT~EvA7?UgQQ8&l+J7vrYd;dcG2xr|ge)dzpmtOH^AqQ6Oq53#booCP zimnnqlkizpf}=xe*+19J#FCm4|6YA3f~H64QFkfn(S*;rLn+Rr6t5=`K9}%!@58Nn zvfelt&|_96beP_hSl%XlUSi2(asR@%>S=mr(EPS_w%BZ=_k{s)u}ui(uid`GVk*h|aCkFFjyq-IEmx|)%XR0*-Z z+Bin@Rm3r2zT-4euqxv{~ zygq^OoeAHC@LdUCLHKTj?@sv2wMc;*AL>(d4*T-%&+Dtb zh3oYt`V0C}eVM*ouh&=TEA>@`?@#yvgujpQ0|{S6_(6oPCj9+`AG{uJeXYJuH~-zB zzXZ4Lf3?I9A$$$thxu2n2MF(fh02fi%}QiWd-PrUZo&^;9^mzczGuK)R(?Wn=zCGy zvrA=VO`U%*^j-R!0ek=d%hBKQ7S!tp2!H#Sx?g|a6nsGV2k&%?RsTpo;>};7AJPvK z{vpDTSfL-)j}d+(;YaoBK72^`x(UHGy4$cjVNTr*qwmr`H!toWYWf-dyti3fb#LJ+{fd57|3SY-_;G|EPxuLhe{7Zh zBi#DW`Y(i^=&#Kb!h5O&r9)|1o7xd0sz*OkGsK*szC+1OOQ*RiRMw2B8C+L0#JtIXoTK1F~SJ%z2mJJcEj;h!MC=cFVJ>`UofykX_@cO6y~D_%0%; zLw%;Z>OtT8S)nBxh*BnUjwm}CxlQg_2^JbrhFFs2fI+3lswc*mdmjK&Vz_uH(9RO_a0k)3- z+hM?V6tH~)*ggepp8>WX0ed)LPXX+WeXry)>7Jf|eFR`11=vRe_A!8cEMOlG*e3$^ zDS({-_9p=QG{F8eV6O-4djR{Vfc*ks{}XVe1CCaJqZ)8b0vrng$IF0YBjDH#INksp z2LQ)=fa3$eaS(7E0-8Eb0*+IF<8#1q25@`@2vLAg1_;%FFcT0K0m5^>8F@^iXEz`m z1cXC?a0C#J0m8?Ca1szs1Hu=8a261*0K#v8$N*vpAaa1%01yR0i~z(aKpX*xV*$|% zh%*53c|cqNh#LTL8z8;`h;ISndw_Td5I+Mv;unB;1`y8y;w3=53`oU*)CrKf08#}Y zbqA#WfOH=qtp=nmfV2&eb^y|=fV3Zw-U4J1kYfNj4v-T7ISG*6fSeAn}GZqpjZHf1(cqEazCI90hFPD@&KSb3@DEP$__wz z3sBziU2n$dp3{JG0Z_gLlI6Vt2dJL{>d%1209pv3@qpG4 z(82)C259ks))dfOfR+hp*?^V{XsrRQ6wo>VT1Pu80No1cGN7je zdOo1H0Q8oCUJU3JfZh+#hrk1UG@y?G^g2L)6wt>5`ul)>4A9R2`Z++q2WPb!gm4TuLI#nf$-x%_z58VBoKZI2>%QS{{jd<1B9Og!p{TYW}K@)_%$G+A>fG+ zfe0Cha03x}Ktw(e(F2IM4~VD&BAx;w<^U1%fQXGi#7-dMH6Y?+AmR%k;tUWO4Me5@ zk&S`K;XvdVAhHgK+zLd#4n*z&B2NL4UjdQd0+H8%C;^Dl;Xzb0AgVJE)endo07Tsf zL=6O@9s;690#T0wQR9KA$AG9wK-3f zqDKMIqk-s0fatM6^mripF(CRGAle5+FZKY@OM&R+K=cYAdJ_=+3J|>mh<+7_-UUSO z1)}!@(Qg6KM}g?Cf#@qh^zT6QpFj))#DoAb91s%<#8`nCI}oD-F>ydl0uYl7#H0c- z`9MqyAf^zADFR~515XSEVwM0g>w%aJK+I+!W(yFr4TyOSh}jLqyaB|#1H^m;#C!|H z{0zk00Ag+eF~0(_91z<8h?Rg?1&GyvSObWS0Aizjwn9etWCF3(K?3R195YJ zxcNZbLLhDt5Vr=1dku(t6Nozu#2p3F;ywZ5J_X`V194}7xN|_@e_df$AS1cK>R!)ei0D=91y<*h+hiCuLk1R0`cpC_zgh(79jp4 z5dRYp{~M5C0TNgsp#hN42uRR?1jE;|B@@YH`uetHB0c3mLM@Om6G+$sBzy`a{0$_= z0EwwUViO>-DUg^AB<2E%ZGpu0Kw>$N*a=AN0wh)diQR$39zbF*Ah8NatOgP%1Bp)o ziPM3^O+ezSK;kYS@g$IV7D)UGC{O$gNaBH{hCoslkW>I9wFHtT14&N-Nz;L(^+3`V zAZZ(rbQnnb97y^SNIC-~oduGv07*aiS{5<7hXs-yK(YuV%RsUUB zR{_b#fs_y+B^F3=0V!!fN+yt!4W#4&DfvK3Cm^LKkkT7S833dV1X2b8DI#_ zft25Yls|!#TRU1FWX&`kLkopXex&U2~ z>H|_21F6pgsVjlh_kh$RK zD8LyDI1>P8GT>|sIGunq4RB@x&K$tm3~;spoGk%o3E*r4INJiw4uG>0;Oq)Gy93Ui zfU^(a><>5x0?ul{IRtPH1Dp>6&XIs~G~j#$a6SrnoD%@&B*5tboR0&}CjsYlz&R6e zJ_9)C0nYh=(+4=81Dr1a&gFn}CE#2GIM)NtmjLG`z_|r*ZU>yN0?yrla}VI$4>;cj zobLk84*=&!fb$68JPtTNfd|f0fb(;}c?NKP1voDN&WnKad%$@Wa9#(TKLgI2fb)02 z`4`{<;0gg;Jm6{sxU7K70k|Z076Yy&fNL4xS^>CL1Fm&| zYXjig2)JGWT-yNGPQbMbaJ>Pz_5rT<0oR9s>k8ny0l0nz+ydYZ2i%c>I~Q;l0qzpO zJrr=)0`7+a_Z+~z2yj0Kc-%Vx_g=ugA8>yGxW5717lAYZNDBwjB7w9*Agv5YYX_v& z0BNIuw9!D?^FZ1vAZ-nhb_7T}1*Cljq{jg1sX%%YAiXD$J`hMB1f(wj(w741%YpQ_ zfb@g#ApH=KegjCq1!OQlMlz6*24rLa8C5{W13<=uK*l^E<2fK>36QY^$k+>H><2Q= z0vX={8Q%k$p+Ke#WU4@BHjr5eWEKIL!-32(KxQ3~xd_Ot2QpUznFl;T=3yZ7D3JL( zkQD-CaX?lAkmUrj+(1@2kW~p}^#rmW1+qLq76Gyr0a^7x)=D62FOc;fko5tO^&^n= z2axp_kev)Z&*MP$dLVlXki8AaJ`7}k3S^%KvVR3~ z7$7GE$cYDXngTg4Ag3dc(*wxq1?1ELIg^1L50JAQ$XO5MYyfiJ0dfujIY)q;Ux8c( z$PEE$WF^ z$TNVv7C>GbAg>I_8wlhL1M-FgdCvlQ&jWcc0D0SiygfkPJ|OQTkarfy`wGbW6KKW( z%^JXiX6Zn)e4tqipjjo*Yyi+~AkfSMG@A}Idm3oA5oop(X!aV=>@?8qJkaa{(CikF z-w?=@{ z3<3&<0|gHO1y2D5bAW<*K*6g(!G56NEui2sQ1A;-a1$s@01BNzp&Kad3>5YP3i|+s zj{}7>fx_89;dY>K4^X%dD7*j^UIjdb*MOE9&@vWi84t8<1GMY}wCn=3oDH;G2((-T zv^)v4JPWk^3MjGyMJiCF14Ye&qSioBDNr;ND5?dD9tMhL0YwXdA|Fun3Q)8QD0&?z zIu8_G28yl%#SVB-Yyia(KyhoJxFb;987LkN6i)z(Cj!Mwf#S75@p_>61EBagP<#R? z{tYOx03|F?(jF-329#6+B};*lwLr;wpyU!z@)J<<3(%?+(5f70)d^@d3uv_fXyx+& ztu6qqt^%#D0j&#x)@4BJc0lU|K*YY}?}64o1FdfWZCpT`T%b)epv|K|8xPQi z0Bzm^+8hMh90E!k0i_a9ssN>ZfYNH9bTCl50Vv%DlLPfwtFx zwm$;xvVe94K)aSeyKz7}FVOCBpxt{wyQ4t6o_FIAW zuLJG(03BGMgAM5506O#pIt&Cl3<5fA1Ul>lI=lvS_yy?jH&70syg5+b8YnLX%Etla zUZDJOp!^M>`~Xn?9?&ri=%~PhjvCOh3(&DQ(6KMjaS70IHPCS_(D7TK<29h;k3gp^ zpi=?RsU^_q5uno~pwkqf)4M>YBS5EPK<5acb0W|=8R+~t(0L}%c{b4bW1#aFK<6_+ zmsFrj2GAu7=<=8c=rR@P@+8n@H_+v6pvwWEs}6LH1G**vU55i*#{gaHfUbLhuI~a} z-v=rrpdu2ehz2UEfrvM*5CAE=xQR4xW8 zp9d<>1C^J7%Bw(+VxUKRphr2-<8h$JOrXbXpvMWI$Cp5lvp~-%z|%7s=$Q)i90Bwk z3-lZZ^n4fSc?9Tr4Cs{(^vVZ%wE%iipw|+h*HWO@IiS}ipx0%fcP`Mo2tMikAnM*&Vtyw0Q?MumI5dx5$wH}YuC?q?cM8DuNnd&U_jo`dwo%?Gz$nS zMc#n)-V_lOM6jWd(2?Fr0)zz8tUONqvaa zhe>^u)W=DEiqw}$eU;QyQk#?7lGN6ujwE$1smnwtwys1nx)XJ3C%8|*(Ef)jAmEU>{^=LK(jk&_9D$v zX_iK_begrISvQ*Xq*-s8^*c*Qv)^g9fo6MXwvT2UqO0%_T#O(JayY12rXPufD# z7Lm4`v>!>^NZJq*apk56v6X{34oPLi2lR{s_$< zBicNJ<`J5=r};>lkE8ilH24QlBnDkFc|BUp}q<=~JIMOGRK9%%Yq|YUN0qH-IzLNAcr2k6#@1(CIJ%{v-r0*m> zkMx2l=_g4qCjB(&6{J_u5-sb{@(NnsNXwgPc`GgNpylJVe2SLO(DHd&zDUa$EvM7+ z8(J=*<&U&nNy}BVTtmx0X}O-38)&(imX%~wBjdaa$+(J)Ysk2sjGM^#KQitg{nqoL zY4m?&v?e2yj5o=MkkOuuPGoc;qc0i#$rwP!`(%7b#&|MfWK1Gs3K`SMm_^21G8T}r zn2aT4d{4&DWUL`$7a51iI7+KHS|!n{9?@3U(dsr@-9f7~T4mCzEv??A)o@yUOsg5R zT1czKv?``m6|K(D`axPhN$aO+J($)bX#E+j=hAu^t$(2PkF;Jv>py9|j@H|0y_42^ zXuV%l>qE3YO6ya!E~Rw^t^eVTz#DPAaT{;6;EmzDv5Yqg$gD$VV=`07Y)a;ZWL`q% z#O+DJ2OPf}-$)Zhf+VrK(+q8Ly zHt*9WI+!+}&}JlUM$_g?+QewHnl^iBlTVvdv?-xYIc=(F8&BIr+MY+-^J#k#Z7-$m z6|}vYw%5`2F52Eh+xuwy0Bs+l?ZdQvjJ6TlzE9iPMB8qlZ82{);myl<^9tU)k~eSS z&0A=?Lx5$YCORM2-t?cUCF>5d?k4O1 z$a;XRf0OknSx=DlG+ED)^&hfcBI|!-y-rpdS?Of8A}f=uH_6H(t0P%m$m&j3FHu>2 z$r?b`TV%aM*85})CTkd3AConLtWjirLDpEZz9K7|tgp$MO4barW|K9Ktc7HKL)Ld> zEhFnkvVJ0KHCexs^#@shk(EQ%MzXd<$=XiVPO|opwV$j*WE~}|fUFZ_6_ItCtTNhH zqkSChucZA=w7-S+PtyKH+P_5m4z%w{``)zwiuO}!Kb`i!(0)Dbb7_B^_NBBhr$e+c z9WJ87C3JX_4lmN-B|3Dc!vH!Aq{DbROrgUxI^@#fZ#wLx<0W*wmX6oc@l`suq+=^O zeon_29VgOpH68z=<9a%l(Wx4p;^=fWoo*r8=~g;r&?!Qv_H-Iar*U-picU-Dw31G% z=(LYc`E)u)=Oj8ergI9N@1XMobbg4=S#<7B=bm&PL+43!o=oRo>6}C74Ro%eOHI1e zrppzgy4*;Yo9Xf_U0$Kft90p2m$&Ki4qe93WfEN`(`6-Hey7V?x}2iRKXgUctLb_R zU2mo9^K^ZcuCLSeExHb->rlE*q3c|_&Zp}-x^AWG_9$J;=vIwxadf+kZr9W8M!G#i zx0mVmKf3jz+go%SM7MEtn@qQR3}NA&!Jp7ZIs zl%C7zxtX53>A9Dl|In)zz3R~G2728=ue<2=61~#s)q-9F>GdJKhR|yPy}qZ{a(eBd z*HL=q(>sygjp&_B??>tVoT%O}(7PMG`_ns0@2}`RmEP0oy@uZF=$%8K>hwvZPhI-l zMxXoW^8kH1(Wf_k`qC#xpXv0ONuR&yvxPp}=v$q>iS(^Y-@EAh5Pct}Z}TX9+tBw- z`hHB`FX%glzN_fFmcD<{x14^}=~sh(SJ3Z9`rS;w*Xh@aes9ok82v`k?{oSsqu*-! z{X)NE^edrX8U35m|8n|YN&h$K-+}(oPV}Ec|8MF49sT#yzkvS78E_5*nlRve2Hekp z#~JV>16nhnJp(#2;6ny{%7Bp!SjvD^3|P&8Lku{7g6MyU{+H-k zIU{;jTY8IVHqjYGXAxb`SzAN&Zw6k>z-t(I9RqtXFv`HU7`T#wzcX+xZ=J_mm-5!- zyw#ew+VfUN-de_6t9k1e-fqa-7x4B)yxpF+d+_$z`~U5qc>53D{*ys*3`$~9JqA6; zp#L%GH3p4g&?E*;X3!o69c55H@7&Bgck|A@yfcV*hVssE-uZ)fHuBD9-c9D+i+T4_ z-fh9VZF#pH?@s02c~Rb7z`Lh-_aENFdw22PL%jDe@Ac)qcX;nT-ur>~e&M~}ct4)^ z8}NQ3-hZC=U*-MRd4B@$PviX=yr0ker+B}F5B|jm_wvDgd@zI$M)JWZJ~(^C9~|I= zLwtAxAKt--ck$tSeE1O`e!_=q`EU~-Zej2_3~s{U^BMd+gI{Iv>kR&k!Q&Y`fx-J2 zoX_B647rjaH!3r0Ye)x^l65^#L!n5`V~W`GITma zPcpQEp;ZifonfsQ_6EadGHemUzG2uIhSz3zJi}jPcq+rw7(RmGV;Mf4;l~+X%J6bN zdPvkqPw~++d^C!WzT%@8A06eRB0f6B$9MDbzxntPJ|4lxWBGVIAMfJh!+d;{Pj29o zJNV=-KKX!8KH-xQe6pQS_VdX>KD~}lZ{yQD_;e_re#WO~%bB0<;M21KhZ%7-BW_{D zt&C{Ph|Y}Y%7~?mSjC9dj7(JBYgHO zpFPiKU-8*gKAX;{T8xU;XVkfjiZH4hqk1rE38PjrY89iC7~PoBDU42KbZbUuGI|=L z=QDaCql*|_$>@Lh{60Q^jL)Co^LO}s7@vQ{=YR3}7Czs`7mfMiBEGnU=ojgH@g`qH z_+l1cEar=E`LY^cCh%nvU%tkd8GPBAFTdu?*?c*dFZ22G6knDwCWSFqFy<=8T+Ntk z7;^_>?qbZtjCqVPPcr5i#yroM7a5Z#YD_v~S}`V*F>f*^i!oyvvxYIJ8G8j|UuJA) z#tvZYK*kPY?0by;kg-D~D-+%h+{{-N@K2jNQ)I zBT>dyGPa6wH5u1{ag7+)gmD)z?qbGW!MLj#cOBzyW84djdyR3ajBC!gmW=DdxbBSW z$+$j@>&Liv8TTpUMlx;;C#<)q0o58r*jEl}=+(O1JW!y@}9b$ZS#wRhpG2>Gh z-<0teGX6To-@y2L7=It*A7uQ)jDL*rPcr@)#=pk+W{hva_?C=s#rQWE--hwS7(b2i z>llBUuNo8m>R!Hjgs&cB!tYG@lL_mYuz?AinXrusJD9MW3Hz9EkO@bakk5qUOgPDe zQ%opjLIo54Ar^?m5vxTko>&sG`otO#YfS7sV&@aPNL1`nVpkBmn%H&3ZX|XKvD=8< zN$ehC_Yr%L*u%sgBlaY*XNWyd>_uWP6ML0dDzWCoS`uqbtPQbt#M%?jVR z>qqQt_KUqu>|J6X5F0{lII&NNjU+aj*q6k{5t~44BC*NDrV*P-Y!0#c#1;|zme^8a z%ZaTZwu;yqV!shvOKcsnTwnum zb+T)cU5D&Mvg?t3F4>L9P9eJ~*%y+13E7vEeHGc)l6?c&H{kAH`)JpHtjz^ z_P@z~l!6FOmH}vR@}VjqG%?Talef_M2p9k=>E(E@XEnyBFDg$sR!V zTV%gO_WNWHCVLp!ACo``QYLH1Zt*JP4?GhPbGT>*|W)>NA^OpzajfOvX_zl zBiTQZy_)P_$^L`vzsSxZdn4If$lgx&PO|rqy`StuWFIBFfb0`w7m@u<|qhz09 zVl^h#U}9}1CNQxs6O);E9ux0n;v-CajET*e*oKL3GI0ioVID!Au&#q|cbNfJxsoX*rY55)?A2h_CP9 z>j(JyA--&1NiEnjcv>-~IvkjZtKoXq6&n0z0TA7k}0rW|6*5vHEY)TT_mfT{N|^Ol!on`_e!$GZ%*dCAz%$mfk$;?{AtaWFf`#H?YXVxiZl`y*jv(IPth0Ojpv!7=6v&?>z*3L*a}F^#g}Ik7_cG=_$=ny2`x0~CW$tk1e$3p(%>9A6E10{VxdqHU&b$=nUBbM} znD;F6USZy=%rXxSIv{vY-zO2C?8>7OY~yS{D4pf-)AK zm9rMcvG7V3-o(ONSomKSHe+FP7WQW0+bn#Ch2vQ`g@w~txQ>NeS-72rl`N{kqFOAv ziA8s@=pGiOu_%*8ZCNygMI%`>ibdbB=tmZL?Z?ECofAQ^YeET-v4&mEje7l8j z_wemLmYm0uOGPcYoF%PTlEsn^ESbxaB`jIWlG7|X!*_x29_72|`0fS1`<(A$d^eHr z_VC?NzRPFn^(?)erFXKl8%z7MG|JLtEM3jgU-&+r?;G%aBfft&%J;AE{i}TcG2ef| z_ha~e1K;o9`&}%%h-Fu^>{^z!W?6fdb!6EhmMv%5k1P)?k7s!z%O7X?3oL(;H=0>#Hu!|>cpxpteVQI zd8}H%&vE>m#LxBk`4xU{!Ot!Ec?Lf(eKLAFE$u^;A~RWAy@7 z7qhyG)n{1qZ`M4`nrB%vjy026GnF+bSX0KD3VwNzU!LTbr}-tuFVp#DCcm8Im$P^k zzuw2Mk45?Q34ZO*ukZ2e2mHE@U$^q>c7D5_-)`r(JNd06zxCp`KK!o)$ngTK1-*8u(+$X_e@>vy7mtz}&@>n>*9rL60~x}L1-&ARVc_Y>=WW_@6NJnIu# ze*^39VEtXJznAs*v;J|`Kgs$xSl@;9-B{n9^*vZWi1qKX{silbSbv)JWvs6hwf+n_ z)yS#gZ2J#63FOoz=Nxhxl9NnM6LKyf=VEd$Bj-wTt|8}oa&98$U*z0Q&Ryi(OV0h| zJVeeTVHlPax%zygPgYHM9AqtPG@quk<*i$KIHT# zXCOI)$a#;P56KxyP6;{X%H zx!00=1GzVodn>tjkb5_||3~fvY>mUy-#vgIYVv}8*w zwnQ`8(v~fq+0vCQ)7Y|@E#Iq54!VCzq8UCq|BTh+0mUH%h>T7JN{tDU+h@Vj&1Dto1K@i^G0^w z%+6ccc{@ArXXk_L?7_}K?0lD_c9pZMlHLDe z_XF%c`*GR*2)iF+_w(%j54#7mdo;VhVE0&dk7xH3c28qZb@tR}&$;Yr#GYjKT+E(J z*)xYd-?L{qdseXLC-(foo>}|r{%h-EXOxW9=y?xl*@9Y43-(v4z z_6}w5YWC)`cO!eZuy-4K_p$c?`)+05gY5e^`yOTA ztzh3z?61!L`s_cK=>A6RPiFtc?7x)#ud@FQ_P1ewJN9R>zdQSTvVRTxH?V&b`?s=x zJNx&u{~!mh=D_V7xRV3-aNz$q@F)i!=fGLKhy$lMP|AUF4p!%24N(X0dB!#9O@tC&_E6i;?Q^w{mh|U4pneCj>G40_*@P*)T%#rgray~~c;>gt;xt${qa^zW#JkOC=Ig-ke<{W9sk=7h(&yh|X z>B^BF9O=!GksMjdkzYAd$dPi6*5c?nM2}v`(Th2H4M(r%=uI5`7e{aB==~ghh@+2i z^l^?p#nJzAG>xO}IGV-L?i}sK(Y_oVz|prjI+&xwIQlV1M{sl$N9S>L3r7!qKPCS&@;@g(M*bx7r;tCL{8{8LCjSTWe<6P(`J2h#P5wUe50Za`{Cx6HkzY!F z1^NF_5GaVJpaBIJQ*bE-*HLgI1-DRe8_|M0DR_{AhbefBf+s0>hJu$VXih?4kS&ap8Z8_%&A$0l)X7RTmtYyrm> zb8HF6R&wkwj&0@GL5>~fSRu!XIab24a*kC+IbM_FbvT~L@p>FTm*Y)2eig@W;rRU= ze~{x(a{L*NKhN!jACvM@yZJfB16Zdf9K2E%PHi#3$II)Nm%Q^8oCpL0oFDLeMqJR@8 zI8nrj)0`-yuo{ImD6CCk0)=%cOs4Q+3U8qBCJOH&T6iyo_fz-~g^y79EQK#n_+JWN zq3|^dGbqfWum^>MD14W~;S_#C;YbQcQ}`u?6Dgcb;WP?oQaFdg-zY4k@C+xf;^fVo zypNMlaq<~aCtv2|tDH>bWOGioM9{vMB0EQ5TB3Q`C#1 zwx@wF7+K=I8K-%9Zv6hA=m(-glQLf)rM1@IW>S&13C2}r-pLsBTjwFsn0kyo>MVSP2$uPPEF_30#5zF zsXsXNC#N=ZYMZE2J2p6WBr~k$2 z+c|v~ryt_NfSygpyWzQ{zb{XlsrMnQE>Ll0%dnq2webrzk0< zq=J%vD6K_lJf%sL)~B=qrRP(6HKn&udOxKPQu-vN&rte2r7sdKeVNkcl(wX_HKlDR zZAWQWN(WLpgwoF`{gTp&luo8}8l^KSokQujlrE)oIi)KoT}A0yO1DwEpVC4~izuz6 z^bBRyD62tPZOYCORo0NQWXhUQb^&EqQg$n4_fz&XWzSOf3T3ZR){L?ilx0vBp{xUC zohj=^Sx?FaQZ|&bQIt)j>}$$qQ#OyXg_M0m*>{xvMA>S}evMN02W5XzwuQ0-lpUk2 zg0d>gYf@f^@|*HCdi6}JF^=t)H%D*96~kctnf7(vB2DyCC0lZr)Dd`rbrDwb2Rf{Nd$SWCq^DsrjV zM8z&D@~Jp2siXYTd3SaWdW6?RMnuW7FG4BYCu(Es?MY8e5$UX>T0U4qv}SgZlUTPsvf551*%@8 zDwV3{RJEik+M231RCS`ND^)$H>P=NYs@|n)I8~oh^)*#fsG3XF0;(2MwS=ngsrs3! zU#R+>sz0e(Pt{JUc2l*FssmIVqv{0z^y8lo`DY0K3@7@}$NcjJ|BT_xMVz^wGdFVP z7S7ztng8R={haB`nGZNKm@~sT^ATr0=ggOaD+RX-ZWr7oxL0t$;2}YpAXCs*5D|0` zL1T{yM*wN5YmOvMhNc-VXP3Q31Nm1W(#4S5EcsI8zJNh;cp@A z6v7@M>=(i*A(RTCLJ0o|)dwLRGTVPTP0LmD^&YS zsFovC+bC2!C{#NlRLd8t9T%z<3e|5Es^2M8e?+MMm{9#mq53mI_2-4^FACK=2-SNE z)q4xo`w7*fLiLY@>LY~eqlD^T2-U|3aaRd((VK<1`-Hd$gt%rxTniyCLx_7rh-)jv zMTEExLR@DduA30oQ;6#$#Pt{A1`2V5gt+&FxQRmCWFc;v5I0kZn2OJYJ4Nq_)e(ty-=f2sPT_blTfp|P_u?m^B$q*<3i0Rg__R@ zHJ=k|4iaj9EE;NlD%AW;s5x4w`G-(*t59>hP;;kHbGJ~dkx=VWq1NR>t*eAu*9f(m z3$-Fbt@c8#PC~6NLaix6t%X9Z#X_wmLan7jtrJ46DxuaHp>{Q)cAQZAZlU&LLhUDn z+D}J?+RqBL-xg|rB-H*ys6A4sJxZwkyHIbxM-`L9st6`@W&1_}v-goL3&!f+ws6Cq)wkT6Y1m?0$06cXkL z3G;=7MMA<#A>n5sVU3WmAu1$n5)!rw34aR-yM%PeH zo{;#VkQf~*Bz`0$ekvr65faAg~Uxl z;#MK?Zy|A)keDwd9upD^g~VbZu|!BL7m}(ANi~F|+Cox-kaV$-bg7VZnGj97Qb@W+ zNV;A~x?4#4KOyOUA?ax$=~*G^1tICbLeeWj(rZFecOmI*A?Y0<>3tz-u#hxNNcvbv z8Yd)uB_w4FNnZ;|Q-!1%Led%`X`PUiBP4AUlC}s*1wzts(U5dPNGcMNP76t8Lfx7| z-8w?u1fgzb#D^t-Xhe!O{jaPQ1=<3?sG!j=Y_g23Uyx= z>b@$}eM6|*MyT6PsM}ts+exU~RjB)xP_w%Sw_e-JfS3=!vq3+j0-RVNzSwh{p zLfutD-M@u;L8w<-sMkoSmn_t~P^fo_Q15b~-c>@q8-;qe2=#6g>fI^SYbVruSEx5$ zs5eEZH%+KFU#Pc8sQ0Zdmkxlk|qyHIbfP;Y}!Z?jNun^13uP;b9b?~qXMs8Fv! zsNYzq|AbPOla`2&|rknV3g3{3!%YSp}|)|gPB5uIYNVZLW6}ugKvZeON0hXg$BEY zhBbtSHwg`26AcYBgoY8JVOODHKcQh%X!wE9@FSt&NTJ~rq2V;4;e4UtLZRU|Lc=9O z!_`8=UxbF6g@)UNhC76YXUX>o4G#(pj|dI(g@zSE!+(TEL1+{wG^!;uiWeFs361JU zg+><$jV=-zT`DxXTxirzXw*|^)LUrOPiQniXtYXbv|ebGD>T|9G}YSHac7}%SE2C&q4AGG5$0$R(fQj0Z1P!P36Ff=}DFoMd<3W6oYV!=mfN&_mQ1!DDT z5e!h}sUo#CSKWWs{;{*x`nQt(XWfFe?_uqBtlf#VHCS7VwRKoqkF}p*?GZe=_8Y8i z#@Z8D+k&-8tZl{Gl)%$idkM#O#j$;GYz~f{iep#e*w=9EP8_=z$2Q{FCLH@6j{V^) z`3m_nyo!zO*w~3pu~}gAFl-)y&113oc5EJx&39q*RBWDw&9kw2E;cX1 z=BKdvb!>hUo9nT;fsxGzvH1`-AI0VtY))cxD>k=b^IzE9j?JAoJ`KmG8tgB*i{FgON-V=;Iq z1}9>0G6wI(;B*Ymz~D>_7GN;C27{Y1xF3U!7;M7eVGMqS!S6Ac#NcljyokXo82l51 z*D%0{6T!P`Hj0``G;bje7rehI@bW4IE->oL3y!!;N_j?pxX z_P}T+M*Cni3!?@`;~2FuiZMC@qvJ6;0i)9}`T#~B#OO?n7GQJ^Mi*hU7^5X4j4s7! z8Aen4wHU3!=thjbkI@E<9>wUl7)=@Y9Y%k^=#Lofz<4^wGcev0lHD+Q zBPM%bvKJ=%V$#B-jY*2h1ST0KB_;=BawsNqFgXE}d6=Az$(fiez~mfE7GiQfCKqAy zSxi=7@^wtUfyp;9xdD^!U~)4iKVW2X7ba^lS%=AbOg3Ti5GIdc@*7MZ!(Q?uzMjOn1k0CZ-2q8e^JbdMKucWBOK1kHPe9n7#wkcVaq~Yw5|D zei+k_V0s>=3o$(((~B_u1g4+F^i!Cw!gM{Rk6`*Dc4lJdAnd#!J4>+hS?pYiot4=6 zDt1<3=X&gX3p?M&&P~|46+8D}=U(jGhn>++vGa56Jb;~z*x7`g-(wf-#@Ia`yXRte zId<>F?mF!L7`yASy8*i!vHKu)H)Hp4?EVG2&tmtl*nI)JFJt!~*!>rF|BVxG!in8+ zVkS=PjiZTKII%xY9EcNjoS1_X=i>Y=__hN4z z_RhxMBJ5p=y-TroHTG6w?-uO+5PO@k_aye7!rm0$cI@qBWPcj=cftN{*q@I5-LXFt z`}<%&#(oF;J?sy#Uts?r>>q;t!?1rO_K(K?eC#j8{$<#|4*NG^e;xLJf&FJO1GD`w zYho5)*2b)d*`XuMj>2p%W+!3x9?VX`Y(8dZV0IQ}XJfVqvkNf07_&<-`wV7RVD<&f zR$z7wW-BrKDrT!NyB@P|VRjp4t1-J9vwJZ65oSNb>;cR+U^aRbv)^L21+z)awqo`a zX4^1(9=(=gR^mPE)G71gT**lf`dzO@M#=ehJ&x* z;65BYjDtxWY{S9daPT6I22T{yfOhxg!c6AmB4;S)IAg2PE1 zZpGnKID8(5|Hk1?ET&Q; zV{sxD@5bU3EKbE@9v1Vl_$U?&vA7tEOR!jq#WE~DgT<9td>)JCSggcibORRO#^NR{ zZpC6X7I$FrV=V5&;(jcCg2lsF{1S`JSUiEn7Az*Qcp8gmu-Jyh^H|Qp@<=QfV0j^y z7h`z|mP@f*hUEy$2vGcVl@EmiJ^)l#gMVKu_)%UG?#Y7JKFvAQ3tpJDX?R*zw|1*@(9ZE@iB)BcCE MfABy2|5*L`KlrY&WB>pF delta 41161 zcmZ@;2Y3|K_dPS286ep*eI|X=f!$3fJt3io5=!VT28akEp%)Q%pItzTPgq1+=pYJ6 z6A=(atTaUg6-2QDBBr3^EjiQMX8tSl2np9-sB)@(fz zW@Y0a4u2$kcKDp|x#9D|pAM(tzVNl->%!NEZwP-rd}H_vTmRx8W<1-hFI#t8_t~7b z3|qD>&sJzFwUyi2*xK7V*{W(S!+Gg9Hwo#kUw$QfNw#>HDw#K%>_JVDTZJX_7+iu%F+v~Q2ws&lYY)5U!Z6Dh{ zwVknjY5Ustz3rmyvd8wb?Kj(>wtwu5owql(TkLkbV%P0a_E>v@J=xyW?zCsvv+a5I zLVKyb+}_6C-rmVxW$$6{W$$MnXur!o)IQuk(mvWg);`fb#r}ZZV}IEGn0VKiPk_Ul$=VqE)nsc2N{##8@#-j2E4vOLU9rVv$%ZmWZWd zrPxvIB=!;eiv7eo@osUrI9{9}P827Jlf^0GRPj;qG4XNn3GqpBhB#C7i3`PN#nm2h zjks1^C$1Mah|h}~#h1lh;w$2A@l|n;xL4dKz9+sf9ug0WN5rGzG4TWOjQF{DR{TQz zLA)gXC|(x-7O#o_h}WeE$s$=Ln-nEQOEFTc)J$qFIVG1=AQehQQnA!t>L6809i>iE zXQ_)+CH0pENCTxol4r0~Bi$v{O7}^lrFv8i=^kI#nL)y zy|h8vA?=i2mUc;RN(ZI4q_?Hx(nrz>>0{}f^p*6r^o{hB^t1GfbX8_#Rt}RnS(GJN zmK8Z(cE|~GqMR;g$eD7MTq>8zE#-2#v)siaSIJ%F0rEh3kUUr(Dc>uPlJAqJ%J<9D zFO;8^SIcYUwenVZo4j4#A-^skl;4pL$w%em^2hS0@)`Ln`D^)m z`A7M(d{zES{#${p(HEKl;(<4>7;a3Jl&L@N-w3K(q9>@ z)F?xhVah$q2<1Lyv@%W^uS`~^C=V#p6{0+(Jf=LZ%v5G6bCr3DPg$rur!*)llr_qW z%2s8k@~ZN>a!`3+Iih@|oKiklzEr+bE-F7MSCzk%>ng7{R&A=Rnrf_?pr)u!HA8Ko z7ONE=wXNDotx|icz0?6}tvW=#TOFm=s}t3!sz-fPeN25^oukfG=c!MtR9&JjRhOyD z)wSvt^(A$yx=r1#?ofBC`_LORZe%pjB!ewN6^K z)>G@H4blc{HQHU;J=zFuq&7|)uT9X1_NexRHcOkMQEh>?NL!*cXsfh!+Vk2b?Imr8 zwo5yt9oCL$N3~k+!5 zt9nztnciG?>Mq@_r|TJdrrt_#t+&zJ>h1LQdI!By@2q##YxN=eP<@zQr{Ap)*YD9s z=%e&8`dEFuK1H9Z&(Y`V^Yo{6sz0O8*B9tMeWCuWzF1$buhiG-&+A+DZMtW>zC+)o zzp5Y9-_qY!WAu0QkMtAz$NEYAl>WJXR{u)>TEC$GXfOtAgc+Q{8-mfuXlyhw?1pIQ zhGE1SaYnL{Vx$@_Bikr4%8fQgmC@DcW^_0D7z2!f#t`EkV~jD@7-!sXOfw!Z9x)y@ z9y1mh&l!t7#u8(xvCLR*G#D$4b;f#Qld;)&#n^4UYV0xg8y_0SjgO2I#>d7<!^}peVydQQ>ZW0uW{eqYI!%}9Hq*@vGth13B;jQ-e z^!D=h_V)4i_4f1j{{T&JH-uiy0>j4%aqeC2RpJ%YbqTsq~3mZmT5;{jgB{(AOicDN^fxk)6qNd zW-DE&6cxkySNpA}qWcLZ%d%leMJl5*s&}z>^?!3oXGL%EI#yz3R$*0EV|CVGP45!# zQtvYFa&Loog?FWQ)eUwwIv|^k3&>^@0uxLhQ=ld113z6y_?0qMo@Llhq8ZnV>xksUQ zB_7-w*=$m7t@aAk2~fCh(j;fyKfSVX3f8SS~aOD}P7x_Q_G zVNpnJ)YLDvk&{uqg9!7pUnyaJ@eZc{Bs%Dzrn|Q$jOdunEZdqDZ)52Fi+|O_J%oMV zdlz935%#?sLc<@TW7W0=NfkSW&3fZ(kJ=B>1mTE)9{yPP3~$NG@W;cS2!E2WhY5Rx zut!&h&kUax{uE)45%vSZen_`1sqHb}ThkD}fUw6y;Ag{^drQ`YFA9Gyd~x`a@TK9) z2>TIXPZ0KF!k#4TDZ+lTCcGhhg+B%V+$Zd(By35bw+G#-<88twZ}BE^|JJU{9$AB6>h%1y#NMa*?p@(y` z*ofaK)56I{{7KVWG-D&K(X5F%tOay-i+I)&MmM!6U@ZdOKheorn$S!BKNgy~F^RF$ zmXoqsi%g%(OJFS;-J9oNEhasim&974>FG&XtR;?$lM`7>0v(v&oV6s=yC%C?OB$Us zxjAcTPLIBj%(&^<$tkQQlV%jSSxXKbT#&+A^5~!`%~?w!-BOUnT1x1#7u`%tdi}-b zOecYNtYrki^ zS<6H^vm}ePOrd*A(pk$idZ{FXwLD1UOP!Q#)#+WOxvb?8I(m8s*77(#TbjdIXhoSu zo0S=?E#Er=&AZfy+vU4V<_Pt64BS24Mr17?(CfRZn2%^}o9?XT zByD)51M?}p+|Es#w~b*fXQ<=RcC6)#+m<}bS9HsxooLTjo%C#_K+itP)Bf$OtmQnd zc&vi8{6PPybkkGqG}dyNUT#;}$nvv)V&~H>?V8f^$92~7EA_VT%3A)Q$J%#fEq_yQ z$7a;qDT0>mm6+?a+ulC(_q{pvhYoS9m8ahJZdz2C$XXlIZhPHyW@U5M8u4_$czX0v zk?ws`XRS8+Ph~6CD$$JBN*R@UJGNo11{J%pvd9bL2Oo-T2$wFR9wuNk$|2>Q<(aZCZ7H!quZJ>X!91B{#JDfFq?R@PcZ zw;UM2R8U9PZmhKpt$DL2)1EGy(}T5kq(5$t4+M7U7Tw6&#ouL}sno+w7k5i#t=;IB zgPoXaDt7P8T6@#M-TSfDestcv?yPknz4lBto!i|;JNK}$)*3pnM-6KoLcQ&Sd_VSx zWvz9A`E~R_B#omIYrThN(K6P0FC9y}v)0jc%Yj_#=$Xn|$I^<&g0TC0+F9!aI&WS9 z-TZD8Gx_O$*;MSMu-5zOwP&hW>vTG}dobhmy;4}KmmckPH*0;EiVKFY*2k#W-A&)= z9nV^yq|4@H)0jREI{8o@tvM9Q%%NiUY`V8k6ty4Dqqe>(Yn@M}!4b*Y8 zH}e9W(Z3^W-ArFx)RwhwrQye%@(yotgLNkfTY6(7vc5tavg49&?Sa-k*1g`6<;z2c z{nlT-C7Z-tI{VlVkM(EkFV?Fhtbv5BBw?#b*jf^{o`gM5!d@U@n@HFe61J6uZ6{$n zN!TtDwwr|QAz}MS*nSfB1_^tUguO+=-XUS{k+4G~><9@vM#4TMVIPsOk4e}m680$x zJ59nqCt+WZuyZ8rYZCS?3HzReT_9mUkgy*~YSk0P) z;WiU)8{u9i+^d9pjc^A@Irlc<-Y48q!W}2vNy2?bxU+=&ig4c&?gHU{B-~H_PQDAZ z7-Dl{m^X@qt?=T;xY)Aj<`3Izxk3A?kiEbg>3dLPN@?YBMJW!t*M!_HY5nmUrB%?r zE@W>;C znoA%0G^X*~pzk$0_meDI^jT8l`9Z_$0jkxf6U{|I!+{Wbsb6U;AJLd18hJ)G8-fuB zLlLX}5pDbt8MI2W(x#tFs%>2`?Cnt4M!Nr8678lKPoI*NO+oj&A@^3=4+}cJ z3^^LnE}yqn!h`m&LUvnVx*q>rQKX>nn~+Znh;W{j6f@}iF64`$UC*{PiFVW$8DLxBa} zNSc+9?c1y|3LW{S)x5D9{1ggl=~t2;D5%|EYB!gKUqS)x=zh5+-S(9jcTcFP8>_^x zp|CEr+quCv7l+?N_MUYAxz0Bihd)F10sdZVHgr_|#9`_GBEGRw{2dA$;uqK@*j3xU ziZySn7ypDJM))II22#+zj#Yw-28U3<7~1yh=WnbP92>Gv_V=T&B!Ow-E01KFsP^EJ z!EvFG2LoL_^BYAWf#KnVknd5t<(sa`6G409kbM>%^H~;c^R1XM$Nr4BWR-oceV+Yk zJ0)B=;UWlUA)IxUeZGAG-1dcpvk}f7cnc)aO;_%3wBw#c zwSA3!E#VZxsf5!Ar?0ZFw{L*kzL9VS;miO&l5V;(jUM?fA!CPq_Z@Ms+V{Y1-$%G; z!o>t|u{8dBr{_)kyLZ(3p8b8e?S}~$PdG;alaN%g;~w9D=SB`0I4w6fuZ7Ezm6?-2 zz2CIl{OnAZBXjz#rvv+O`>8t;e`5bMm_$+&!X@9xqT~74j4$oq`a$RHU)jI5e?z!b z!le) zQJ7sYz3;UAg8bXYLu@Iwxg%#=v0bo>DhSt#aQ=6bk(Bu%CZmhkN=(KGakRH&l{iwoR~#kYN4SoJ>qNNDgzK_O ztQW@wF;#@?dIPiR%EF9kqSudkK%6c zBQwRP#Ha5lkc!U)fqe7g92Q5`Vj6Xa6q#0k`-U;ieJpfdFPYm49rS zDKV08rA=M8`eo%B;mhA3%wYDm&018ymja1RsikpSjV`qa-U8Szr` z9otEfQsI`G67F%rJrTe>Nx%3xH6vZhxnn!IQVY1He8SBn+^hiRDVq07vy4)ymA7P- zR3^2Q%B2dz%^}=e!p$Sx)2pP`QX9CXc7&sZdnSOH9~jE7{Tg5M^mwHImQcuG9 z2)B@M&k}CYDyg^B2X3h!;hrPh;s9_-0QmCd)J$opbk7}Cj*vzM=lC+hEe`-A>4vNE z8DphMcf?GVrob)TPq-C?TN%KtqQ75F&G1T(-VyVd^f=tolZ0DCxU~VyI_mo&Ia8V= z-En`E=1U9UmKG9j1L2+zfHuXxj6K+QU zxRWZsH_dogI^xItucDETO2^=qJ|x^O!o3o}?4~1scV?WD&fKw`&!w|)OJ5Rh58?I( zF#G6%->tjI-=x3qsO_5c58N^c_a@;E1~6~Y z$$vD<5aftEwIf?(E8Mc3aPJWA-2mo2dgPC08JZk*r*`CMIRLXGjimDJ8CPGi{O?^2=@`;P6RL?(~tj5^|X@P z-x1S6u7q3eM7UFg`y_z*G^t|8kmH9jv@kHQTIA$U?>ntU_U#L$++FT{N9}#&zHrO^ z33r-sXM$Vp=hXXGTE<;+-5rJ8Ef0rV9znP-2=`?`$T@oAujU!`@`O8LCd!k7Me#M^ zz6oF=X~y3Pp6T*Kcf>p_KLWS>7~#Gn-1h;@dH?L6?-@2m8aOShurS}{$STbBKk#K2 zWES4GJ;<}z9pTyIIFBZ{D4&-v$QR`U^QSYh(P4z={CWo^RqPlz zA}_u1Qy*X??v<~|Klv&cCV~H1{sr~^%Xpd}6EDlZ$$$9U_?_@VgZwAq8~NKv`N!YJ zb;38k)rP{z7nLxi5P{C%mOWu@K%mzk*3hRYY%1gCY^$ z79vpOi;Cta5Ca5u?;w9abd#c$SU*Dy;pGM;j_^v5A<@r}M0hpCkRo4H()}?OgeL~?{B|*o!>ns=uQi|ulU^)eX|87%2(8gNx$*ti+2Xy4>w}c8IRA= zh>7yO?{^1Zd+`}Tx7L_RZybF2#b*UggBvsH%xvFZe#F|wOu9ss`TlWvhVU&Klm&#( z^N))|S>&w=y#-d5D9ijoO9@}lpe!eRp+6{IS>+$R)r2ql2cgf9^rg=W|><-<_V94CD1oAe+1=}!_5-{!ykpD7!afdT%u z0si=Z^MB#zw}kk=Qoaev|CaFWZ}Ok_^Isr*hyT+5sBBbf1N4=IFZB>U;otnf`1$z| z|8I)F=5LJbpM>vplmD8Z{~yA4{tv&(DjSu%1N>bA^8FtMZjPGT$WL#*MX!c;RV}yl zt5(8yy}_^gR|Qofe7FDdYsyCD-T;610DsDdXre|co0KhTv>M|}urR8pgz!B?_5(Cg z;TEbViYJ|W{P!rXpK~>dVCQhgwGODgk?S$HuYNDEq`rcL5{=1OG zG_~2S9#ESTzSm8KZok5G!uS5KLe`^fRL1ckx|7yRF z+V58T|B*IGt?@$#6aKCS^)ABK`b*cN4)Y7EBm9v63LBwpRGth78yXOn_3t?_+OI1- zq-(4?{#Lu{1j5(d%zLt*ehT65{x7{h@7dl|e+dQ?&k3;+^JNBG9u zn8fHO)mh$>73vIiCgJZT{HPV`Q|fHO-$(e-zRfmfNXmTmS#QZob%E+r7ZQF9;l~nw z+)8zk`kcC$@Z$+Ti||kR^6gB0`xV~eRq9H0mAYD8L-+}VpGJ5O;U5l(Sg*d|C)=Pt zuWls#M8Z!Z{N$DDi|QtIGvTKYek$Sb_x)gJmZt1d_x!KWz3M*oHNrnY`00dy(6?D+ z92E!EcmG%Dd+Ph@A;NnJPYC}|Qp?K-{2K~K3Jb^0BKUyt)jK7L8OfG^q<-ctS)*=H zKUPnw8`V$LPXihsA^f9+pG^422>)CXTSv`ngqSUi~SnX6t z?F2_wR#A42qgxm8e7_N+C)AC*yLRY}q2I6m;w^4~-5VC_(%&_BOV+A?sDG+|DMjiv z^&j;*>BY|_{L_SghVYq$UqJYUgkQ8)V>DI^(>RUS1g#O_7ZbjL@M{Raf$%R7ehcBZ z`FR7M&9vaVe16`o*Rz`K<_8nGaqwk6Pkj$)Ob@Fb+*Rj$Q6m=mUnlz->5S1hI%xL! z#_CLGt0Q>#tUu`1*Bz;emJ+l-=cj5(E00?f{10vxt-0p*7Oc>mnv3vD2)}fNmab(G zei`AH`?3tCS#+LO=nu%(3JAY~@GDnnMOrc8R}p@7Frbyz)*sN?AFwtM&`xVl_;rL| z?{k<;Vssa++kXPOYdr}6JmEL`$_yqZx{uc1AJEs&@M3^rfHsiun+U%-z~C98)%{1q z-P&-%zeM=0N#&2s4a~Qs^2f&nE^dyQMH{8n{|7ck8%y}@gx`@={?KbTU^gy;Rli@G z2%DiC2o>0X68=nhBRa+Qz?h82=-N+ z$hq>ymjPMT)&!kzgq$00d=nB??S-K8&5(0T;OmpNE$DbFz(+Cl9txMA1c zK(h9}|LDUXBfS53>^hD962qfyByQ9 zvI{a@j{Mx5>C{7J%}BK#+W|8yNvk*s~EeXpI@E+AR^!8^z=J{V1 zj~G9pcJzd{!$wS~A2+^p?bP~-6KX`UvQ69Sd+H}w)lM2QymmtUxB-=I+V-p+(z$j> z-KZMT?c1ElMEWvgnGv2rm2KJv(G}zBYWv+iqHff%^cx?`g5COu_LsM$A-peP_cdsL z6aLK2`FY)2yh4Z05dL$*e{nNgi_Yndyag-5hlSS>_I1L(u_Ao9-h{9R2!A%I{C>x6 zrQxyYR$csm;c_7S%cS!9+rxET*WvTgm^+-KRkJe-KSUryLUlIOm!hb{fZwdb$ z;lC&RdBR^){NnqDm_~- z@|LXCbM#!jg`TJ9>jiot;eR0fCBpwm_{)U9LinEu|MOZT!>yMh8Sm-kaC`kfPA2>> zUeTLJ_^X5yYs4hq7tI;n+NXZpz2nE!4z24tdel_kndVHg>K~xCQ^wSe9#%K3azfpG z<7>o(K(Mde$yE7RCzIm2(am}%Z}IY**Hzx)&^bcyuJ`k6?V(reJ@sCCZ@rJ+m+-$4 z{&&LvLHIuj{}vwc?bEm`Um76!e1u>Lj<-)OsZ_twpHzf zq4%^KF?v{4tuMpH{OQq0q6t%_-#f6Xx<~Ek;dQO+Cyt&#gy5s2exE+N|Ba(Yb=`y& zdOhm@buTg>+<=cGLRerI)FIP6T5`_$>V;|Icg%vs;f5!+K1Z+I{#<@|SOs>x=Z= z{+pXTy1rL`-CMF!->1K(?+5aU!GydZ{T9n$?JQ|JqVFs&43nx?w(=$&~sY%49VE zi8QcYSoBZz)82vx{WBt@Ht1)Fkmma*)-r|); zopHA@oCuwWP(_69H}Mu@q;cQvJA*OWs3$^aB6RVwEtr(P#sp*Xe}p)V2o5ura31`uK3I=BsrWTYAk44<*kcs9^wgS?`55D^|C!ZSpePlP2k zqUhhF+tdx2IDB|rVEe5R^;_F<7hg^sGcL9NsKB1yb6nlOrqe28jejz%Cc@wbV=WPC ze3ufLthUDU#tVM?Mk3tRV7y3#+MZo%M~n`2=u5^9Z^24qtFg`4PJ|&u7)pd;D~+8< zHFgo9&acnw8Nbe z2}GFq0h$;;7?+HM{*}Y(Mo$=V_lUZ2L>Nzm;=n3xTrqy~EpssOVJnQEeM=lna_c~N zR3N7CCi;)S?WBKKcHkW`h8fpvt-K(@6cV;Bz-Yo`d{qgI!I>=T{a3orlfdwt$(g+G zi3G;QnF8wlSI)q1bpl5lvx#Z*7X0sDx0rTQgxi#fFpUU-FS^2X|KZU$VDa4F?(mC| z!GWi=nsIfbht-X1+Kk4C&7@#KePc#>YZ}aGA`o9%D&r_HZErn|lWn;Rzzl@c*6V^Q1D}B{SR1^_Dc4IYfA}flUbM3~utRPP|4na0F=%yP5UttvNL6XB^FRc^NP7Oybdn;nQSn+Wr6 z&M1r7$*j7)1kJ8yHzLd-!rVcuV9uRC$DDL|8+FbzXcFDj<`Z z&-e=nsYF;!gylD@=jOy%U%zsX=>Dn_NwYmbyMqxRg4>Y&xlENk+_u9I&1Mtfv6$Wx!?twq(GT3fM}0ZS$B6&q%;F z3b2g^Y-0f1c)&IZuuTPQ4*)g~V0#F#Jqp;K0Bp+u+pB=>1YkP{*!~3UZou9Wu-5?g z`vLnifc<&Ez7eqR0_?j1`vJgy5U{@u*xv>0?*sOafadm(0sAL_{WM@d3y4vG*cuRP z0P#scTmXm*0dc)AHlImi-t|@FGm7UUKs*VEp910;K)e8mzX9TPKwNXr3fJs`aVNV@@PKOh|hr1t^o6d-*HNT&hmb3pnIkj?{g zDd3Si0&-_St^(w4fZPv|2LSR)K;8_7)FFU63{dX| z)OtW23#i)wbw8lK0jP%o^(dfz0I0_S^#q`v1k`hY`Zb__3#i`%>IFdk0nlOrtplJ< z2DCMRb^_3T0<`OZ&UgTw19SnEpeg=w+tAOzvVEh4?3}7|}OgCV*0L<=y*&i?m0%i?h z)&k~ez#IdZ(|t{fm}t)ez+4EJivV*mU@isB<$$>YFy98uuYkx1ATkSx><&au0wO&? zBmp890FeuU$VEWpVjyxU5V;(PTmeL`0wUJ{k?VlS4M5~pAaXkpc?gI+0YshzB7Xur zk$(Y^*MO)RBLa1rW6gi24$Ux&TD|07Rz) z(fL4hArL(Uh$cYv!$9;>AbKqjy&j0(2t>aKMDGNmcLC8K0?{9#E22LJqR#@+UjxyX zfEXJPqX983fS67|OkW_TKM*qjh#3gPi~wTp1!5)uF_VCpDL~BqK+FR`%%i@s#f;+F z1H^m>#2P?sTOhUuh#dvQjs{}K0~bLX1t4}4 z5W5YC-2ueD48-mMV)p^D`+?XaK;)k9cOdppAoe;C#{izVFd(im5El-_S%5eV zh>HW_96($W5SIePl#P0&)zXKd5;K&6W6@a5P;AjUpIslGNfTIiGs0AD&0mmr7F&1!)2OJXt$3uYQ z5y0^z;Ft+Go&p?C1CD0^#{$5y3UKTM9IpY6LxAH5;5ZI=947$BNx*R$aC{Cpz5pEO zfdm0aumcGake~tyI*^bCBs2pOoIrvbNXP&ZCIbmxAmJ$>VGfY607zH}BrF0F76S<@ zfrQmS!df6>&NcadyxB?{nhG--LNMwOT9!P8iB&tB74kRY|j+HXeOo{J@QYP9n z8c3W0ByIr`KL!$i1CnBZq!b`24M-{gl8S(&5+JDzNGb=CS^-IIfTVUnQU@TZJCIZj zB-H~+Q-GxVfusf?X#f2_Cj%Xm-9T~%kX#QWPXUte z2a=xyl2-xAYk=haK=Qjl^7}yYAt3n^Ao(*O`81Gx21vdDB>w;;|L!|h#uy$ekYWc? zBp^irQZyjN1X4NxDdT~Z6+p@{Ae8}9V}aD>K&lH!%>YuffYe+dH4jMb2&7g6sl9;I z{y^$LAayX1dM}WAACNi@NSy$rP6AS=0jbl0R1e@ueF{ij4y3LJQg;BUF9WH2fYg0J z>V6>gAdvbtkoqo=`XP||Bar$#koqT(dJRau4y1`dnhd0=K$;GunLyfbAZ-kgHXTUw z0BMf_X-@!YGk~;NKpF+o<|7AbJ|OK`AguvNI|!s52GY(0X+HyLSAn$OfwVt?rj3B6 zO@OAlFS4A8uFL?MW&urefTk^grujhALZE3e(DW{#=>tI1l|a*XfTlkJ&Fnxk6KEC% zG>ZnBB?HY;f#xsxrk67xd!hhmEZ}qi&LqH@3OJhqP8Z31k;Jg=bjs~1#0p|q3IT`Rc z?+2XI0jC#mJ`6Y?1DsC+&RKwS4&ZzmaLxyu3jybIfO9F}Yyh0A0Owl3xdCv#05~@T z&aHrR2jJWVI9~;v`vB+bfb&hj`8MEu4{#m^oW}s?alrX8JaB#jI8Ou4vw-s);QR(~ zeh)Y=0?r=+=TCt1D&YJbaQ+22{{dVK;Nk#RBfu37xU7In1Y8Q>(g9Z_;EDlU@qjB4 zaHRmQrhv-{xY7Yv7U0SST=^crRRp+70arQTY7MyB0j^5G)fsSg1zbGSNjR#zl0M}H&^#I`V^FIW*9tB)a0Ir#UYc}AT2e_W` z`+b0G5#U+^xRwL1m4Isv;93v3HUh3qfa@i|wHr+|zYAR`6HNCPse zfsBDb#$X_W0vU^ejHN)vejwvrAme>_knszUaSh104rC?)nJyqR9mpI6WZn&A-UDRL z0Wuc?nTvqTZ9wK8Aafs(c?QV*7RdY_$Z8B^NkEnYWMu(aML<>wkTo308UtjF1F{wX zS<8T|1|aJIkafrdWE}yreh0D{AUh1mb^zJUfNUp_-2uq%4rEsY*%N^5=|Hvz$X)3=E(7Go1G!Cs+~z=TC6L<#$n6Q_jstS10lCwG+@(P7 zS|E2lkoyLZ`#z9+7|8t<$h{70o(1xr1M;>4 zd9MO_dx5-$zGu398hu*D7gf9N>!jV7ATDeN-KcU zjzDQ=pmZis`V3II04V(kC_Mv|o(0OnfieXs(}1#kpsXAyYXy`I1Iq3L%Ibl#89*5Y z%H{)Qn}D*HfwEVCvM+$L^FY}}prsXPsl$VoCeX4RXjuug>;$wN4YZsLw44gGTnw~a z4YXVfw0s+Ac?@XzA<*(S!2dp}oCV6;0p(qR^6o(SVxW9AP`(x@{|+d>0+jy@RI~&t zIsg?Nfr=SG1qCYR0~O~yK*dF%;u6rR2x!$BXw?>IMS)g}fmTa_R^J1yegazk0<>-p zw9Wxqw*XpC09sE6T6=)j`+?T)0%h(H?|Xww^LQv3~hb=&dSAY($0+lRK839yUfy!#2av)GS7^vI`RBi_UE&%0MJzfy2b%r9YELN zK-V!q*Kt7CSAni?0$tw%x`{xyNT6FZ(5(jOb`Q{PB+zXa(Cu}g+X0|E4|KNy-9?~# zHPC$^(0wq_{Y9YrPN4fPpofP6dV~W#EI^O$K#%@FkAXmsmw+C-fgXE+>M)?%0#w_8 z>MEeR4^Z6?sGbc}`+(|af$A@S>hnPLMWAOX(6b%Tvjfo63-o*v=s6SU`61BrGoa@g zpjQ;oD+%!QN&$L}1bU4JdQAj+y$SR>4D>n*^mYTi^MKw3K<{}#??piG#X#@Rf!^N% zz0U)Ea)3T1K%X+84*~kj0Q$@V`WylJoCNxO0`!dp`X&N>lYzcNfxe@FzN3M@8{k3T ztw7)HK;NrC-+zF90Q!{x{n`Tk+5`O_1NzMY`ppCSeF*gX4Cr?T=-(9Rp9S>K0s4D@ z{!ak?X8`@*1Nt8a`kw#>$iRSjU_b&eAQ2dl1PsUo24n*RN`V0_J-~qfqtL#hv!IhU zfdBr~kOC-?0CrtnyK7(Ds;jH3t1fn32!wzEc|-5@MHEp4kt)*slF&PXC`Cj>5EYPw zj`U6vAPJ-gzvIo^OgVEg=Xqw%Gf&X$Nt*qOX8)$yD>QqZW^d7~In7$qER|-XX|{l7 zD{1yE&DPLtEzN$U*-tdvO0yiA?V{OUn(e1qF3s|3R!Fn+G%KcA1Y=2z4FTAJTT^P6aXE6wkvc~hFdMe}AfZ$a}`H1A6D9yISo^S(46K=X|>&!+hS znjfP1Nt)-jA(KU$+wgI0LibA+??bVB)1~D4ar?d?nd$;k_VIg zKFJ@D{2|H1NFGk|=Oj-i`3sUWNX{a8I>|FhUPSVjByS-3XOcIO{2R$RB<~QFyqo0T zNj^aGVUmxLe1_z6w1}a_CA4Tji`!{&KP~=Fi&trpOpBJZ_<$B8X)%fx3uy5*Etb<_ z8!h(H;s7nLq~(pYyor|Y(6TEnyVG(>l$NV#xtf-{X?d8IM`?MCmW8xDPs+)T=Cq})l$-K5+{%G;#8OUfstd`ij~QpS-oft2VZ zQl^nIgOs_XEFfhuDXU3YL&}e&Y#?PLDVs_8jg%ZxekbJ=DY>MaBc+g(^Q4rKQbEc^ zTGgi2^|ZQ;R(H_qE?V77tAEkz-?Vy;XsZ`!)s$9W&}tT~zM|FFv|3H8HMCkws~>6g zE3LNBDw|f@X;n@tQm-KO22%e@>Mf++PU=IXK0@lVr2dE07fAgtsV|ZG3aPJ=no8>1 zqEg$D+Md*OQah5`nbba{_9OLEQpb=wp487tolNQ%q-K&jozz*R&LwpLsb7=2oYbwP z?jiMeQj1CbgVur8SJL`MTHi$L|I+$RTK@?gt$WdW5UmH(dJ3&)(0UfFkJI`rtqW*# zH*FrK&7-vGPn#jM8A_W>+RUZReA+Ca%}UyQOPkfS`HnWf(q=PlcGKo}+8m(GVcHy{ z%}Lrsi)d3!n=;x|(&i7|uEyJU^L8?C59jTbyq!y0ZPF4+Ye-sS(yk!wYSOMF?S9f8 zBkc*&o+j;C(w-;nEz+8imO@%<($Ywakk)~;&P3A&k~WI8iKNXWZ8m8ONLx(WQqq=_ zwvx2fq-`MW2x)nwoguA&wDYvBN!u9O#?m%{w)JV-ini@(+l#h+Xgi3ugK0a2wja`V zByC5FYCDd$6KMMdZP(HEAZ<_6wurVDXj@9#3fjfcE|zvlv};1UD`|HP?XIWYjkLR& zcK6fn@3ecEc8}8TG1~o;c2Ci+4ebWeZYJ$E(XNPh8bx{MTHd*ycmBdVxAV@Ov|md5 zuW7%6_TSQe4efuR{d(GOr2Vh7-$MIr+Ha@*F52&<{eIdXqWw|YpP>C|+UL{$9PNu} zUrhTl+E>#44HTNOK}BiAGY1yiKGXkqD8FM7j{^PNWx+ zzC;EP8ARkgA|DX>h{$jvBZ-VAGLFauB9n+rC6Ym88j+bq<`9`rWD$`iM3xa*PGl94 z??gq`68VwHPegtpvYE(MBHM`UB(jIdJ|YK+93gU?$SERuM9vZ^ByxdBDUk{y7fBDK z*C4$%>9M3IkY1nk2BbG6y)o%mkbZTP^y^5!f%Kb5zm@bmNWY8pdr5zQ^oK})l=R0* zf0Fd4Nq?5~=SlxB=`WN18tHG6-i-7Xq_-ly4e4!3Z%=v$(mRvhjr5+R_aVJM>CrzM z7)<&Q(my2qW70n%eH7_qN&k%WiKI^<{r^bMB7Fwwvq_&v`a;sbB>gMWzaf1k>8nZq zp7eF3Zy^0=(l?R*8|gWu?;w3Q>Aw?AKS26n(vOjTlJs2C&yZd~`gzhz=ztE@=F^94n$qEAI;7K~2OWCRVG12)&|wxG*3)4N9k$XTpAN-zD5YZ}9j~P0)uK8+ zL&v6ce3_2j=-8i*1L-)Kj??Klla5>IxQC9v)9GqD-9)Eb==3_BTGFW%oyODY3p!=c zX&s$5(P;~vO8E0Dy>m4>-$>^>>3kQRQ|SCol+F=4kEZh^I!~eV*K}S(=e2Y`MCa3V z&ZA2lT@vZikS_Pqn?B zu6NKi`X9Q!O4rxv+KaA(=sK9LpVKvyuG8qchOQgw`U_o)=vqnFi*&n@Zg#}Il9 zrN=UQd`FM(>2X92)`~?xojj^lCw`6nec+uMzYb8Ku`EdM&5dN_uUl z*M52(q*o=qYtp+my>F%Wz4X4H-Y?U;8NHL~J&@i*>HQJC7t?zMy;sru0KHGr`!s!G z>C=Edm(k}*`aDmc7wOZLKGA;k89<*Y^qE1QS@ijlKAY+D8+`+PW9b`D-@EDiFnu4T zZ%6v}qHiDienH<^^qoWBP4wMC-(Blu70gD+?Bl?;wBxI2S;GWdH2|IFa%ue?`{_u_ajf%l&0z1Mi}4c_~l_cD2J z8t)z8y_39mn)mPE{RepeLEe9t_lNQRaNhr!_j7oEJ3}sG$W;uvh9SueNn=PmhRk5d zLWV46ND-pd(hQ=`T5{5Ql=nD*eouO|s zbSgtl1SRNn$g^zFN<2(7d zDV|W?ED;V)6BU&+{ zH6!LQVhJO@Vnh|6)Z&vEK55D)Z}CYpJ{iR)6ZvE^pXBpNF`tw&@-asKn~~2kGCG!# zQyBRLBTq82fRROf`T(Ck&Zqz6(@}gnkxwV{={`O^&Zj3Cbt|LpWz_wQ`hZa*88wPg zyBT$uQAZhlGo$Zj^u3H8#^^DO9>?gtj6O4vK zjixiFzyA$O<~*&#?4}UO~%({ ze0|2hBWiqC#&>7@*Nk7o__cf%$7hLr){xKM;PWJ+hIbYn^nru1e?Kc>9Tlnl5cxSWiu$hek_zmV})GHxN`b~64(#yw=*PsW2}JVM4}Wc-tif06NTGX6ux zi)6e+#;at!LB{{cNG2nNjMikNk?{^0(R4C8kOUvjL*rKOvV>vWRfwRj9FyNC1U{@i^*6@#@A%5Amdvy){yZ78SBZ|NHpVD zGPaPBO~!UIc9F4{jQwOBBI76-C&)NWMm`zm$S5MCn2a(qD#`eR%xYxTBr}H0I5O*! zc?p?`WG0c>gv=|+yoSu{$-I%wn?+^bM&_Ml-c9CxWd5DZhspd0nNN`U6q(PE`5c)q zklB>XSIB&w%(uvFPG(CoQ^|ar%ywi($m~dF7c#q(*^A7+WDX#65Sj0h`2m?9Madja z=14L}lR1ve31m(pb1In`WKJV2qsSUV)?BidlC_Mi-^kic)?TIsro}QXo@uu*?H;Dx z$F!H2_CKaIXIgKjz00)sm^PPbOGQmv#t4+|A4?X4PU=46|-! z*1gQSpII%L)s9*1nKhJIpE7GSvlcSz8)ijUFl#Tfjxy^wv#OX~i`g;E{tL5jXZD@U zeuLR5%uZ$YFlLWo_BduQX7&nZuVVHQX6G^c40Gx*=Thb*GUsmQJj|R&nbV9pZJ3kB zoFPQ#e8QYhnX`a7Uo&SpbM`Rj2y>1x=MUz_FgKRDcQf~4=03{YmdtI(-1f{J#@sQ? z9mm}7nEMlRe`fAM=AL41F7ui&?|SClz`SRe*OYlLi<;M+c>|aiW!?nlWiT&`dA~Am zJM(rj?=165nODyIOPJq;`ByOie&#>M{3n?I4)eP*zdQ3sFn>JrCoq3G^Vc$e9rI5z zzkvBgEJ$R*l~ER4&4LG6@J|*z#ex(Tyu*SB3r4bF0t+UxU>OU(W5M?UtP&pH}cg@eDy3}z06my@>PGn z8p2mY`DzYdE#a%L_-Yql9pbAaEUUq?1eVo{vg{s~J;Jhouxv2PK4#enmhEEMA(kEC z>%Z{z?R!Ya+a@Tc@-;au_A^QPqN~9R=miH(X5!niYcr(#)>nnILFF6 zSor`eA7o`8Rt{$6`>gz)l|QrcS60QcssXDmW7Qk1N?}zh(Nz;!mBp&*tlGz_de>bv0JUu{wd(FS7a#R=>sSv8MvNG&+1}U zm-5{|`0g3LdzSA;^4$bc-%aGZlYCdecSWqZlQn;5%|oo|#+v@D8OWN&tXaXDRjeuD zdwgGw@BhX3FY z4}l+I`5~SkUgd{merU-LtN7tZe%Qdex~xlLU1Qd@VqJUIrL%4+>%L{(cdWa>x{LgX zA7A3f|M6pUe$3*>dHlG5A4^#uSRbv<`sZ2y8tdO+{Up{;WBm-)Uts-3HekaOYw+nH=Fvh>036f zXVXt?j$?Bon;WwEH8!_ka|)ZkVDl_C&tda%HlJm40bA~3%R_8=ge|?<@-AE6W6N5$ z{KA$^{8o?O{?vbdYr=1B_^kuKb>g>`{I-tY*0Z%XTkEs+QntRx);HMt7F+wW^*y!@ zVe5x%{g|y|**c!BU$XT(wtmmnwQT)?t-rA~o9su)ew^$l$$pyb=(A)$PxgPwewpmo z$bOUTW@NV@yA|1O$Zkt^d$K!_-I?reWcMVy583_yEJXHTvWJlUA=w|3{R!El$R11f zXJk(#dkWeAM|Kw3Gl*u-CVL*)3(5YH?61iFhU}GOuO|C@ve%Klf$X2j-bD6qWap5* zgY4bpJVnkkI#oK$k&CZ`=a5pp_;%IQK*cXE1>)0dn9 z(d3LHX978s$eBt`207EnnMuwZa^{n>h@2(lEF)(*IjhL| zj-0jR{7BAEC2U*8w$*IEgzZ$c`)6aWgw^Wyc-t_!~POWXHqo=*Esg z>=?|BA?z5+j!)S!njJs0V<*uayV>zOJNC2V1UpW#^LBPV#Lh?9`4~H&VCQq}e4d>j zuyYJM$FXw)J14R;i=ETixtE;Kr5DQeer zcFknhpTU*vy2$Rp?&|E0XLnt8zs&BI>~6*GHtbGg_X2jWWcRo1Uc>IS?1^Ph5_=l6 z=L+^*#h$;i=VtbFWKTc#3}DY7_6%mv$5Hl-V9z%89AeKA_8e!=N%j=5r-;4xu=jEH z{*%4`V(&BTZOY!4+4~WD$Fp|=dnd7X3VUa;cNTlku(yJ}Rs4?MtMPjrzbEkfQ~drC zzhCkSzrW7!Z}NL9esBHRwOzth(cdfB7sI|-_9d{d9{U=xuL=8FvM-%|9og4~ecjmC zpM3+__Z9orvTq&xHn49a`?A@$jr~`${}%S&#{N6me;4~7V*exTkFtL_`#)j-C{g>z zuzxc9r?UUgFC6b6_L~ zMsZ*w2PShMhXV&WaF_$fIB%+$gQqx{%fa&;Eap&C4kdG_C5KWu)P_SHIMj(ll^m|a;W!T0 z<#4n(1a`^+J>VYINFJ$(>eMjN0)N+YmP4G=vt1h<5(icuI1SE9J`TYH*xGPj@`qt9vmCY zvG+MPlw%)>IyRbPV>x~g#~asnrwB{}oV=8i*KqRRoP3FsEjZbdlkGU!gOj~DIf#?*aqDzRoLWQl)DN6mh( z`khn9IaS1|3!J*h>A>k4oUYC3SWY+KbVE)z=JXYuzM9iFar$mfKgQ`NIQ<-_U*L38 zPQSwG*E!vi)2W<(o73$$9pUr`qE64}^h!=2;dCCSFL1hw+*oqs$-Ru+M&w>j?p5So zOYSY?-cIh{$i0W$`^kNb+<%k%8o6(fn?i1Da?{9thum~>dyw0k+#Pwp~umy^4S-0#R;OYSe^ZYFmtx!cIyN$x>%Pmx$$i*xljcLnF7H*)SC&fUkke{k*z&OODsXE^s9=U(C5>zsRw zbIm!|l5>4Im%+J(oZHE{gPhCbTrmaJDX2+7T?#ItAd!M33Yt)GJq0&Xa5DwBQE(^G zg6AmcLP3;*nG`IdUVhqwqco|4!k<6#j$4Cn$W5!WSrPDyr}m3SXzNC57!M>_Xu{3I|d65rxAk97*A5 z3dd16mBI`Pr%^bQ!Z{Q!q3}BjH&VER!d(;|qVOn%Cn!8kVLpY$6qZp~N#P$9Rf|#- zM^Pe0S5kBfMYmCOA4Pws=wXWfLD3TwJx9?C6g8#j6^dS`s3k@1DC$DdK#B%Y^btkF zDH=)9Xo|*BG?k(Zil$LClcG5kEurX7{4ZKhQ4U4hDcVobA&QPtbb_MO6ctfaOi>v{ zl@$HK`54Y8a{emL-^%&hIe$OrALRTaoPUh-|K$9CIR7H&U*i0$oPUG!T{-_b=VuZ< zzlHO=IDee;1)RUg1zf1Zg?KL1Sg9mJ3_Bu$2pYxUi242f1*B3&*)|mJ5ZVE?nS3DHkdzu0e5qim#;jYKm{7_*RPV zp!hC|@1^)riXW%=Ns6DQ_*sfyrnni!Z&Tcr;_eg=pm-3)?@{~##UD{Tn&NR3PoQ`b z#Z#jc&!l(}#mgyPPw`I_Z>4w}#XBk9L-9U}k5hb#;yjAaQd~%J1tqm9sZYt3lw3{8 zO_bb9$sLs3MajLCJW9#qlsrkv)08|*$;*^Pn^Dq^lJ=BzqogM#eJJVwr<0NoDfyU^ zPbe8h$yiFJP%?v(g_NwKWHlulDEXO^O_cmbNe(5yQ*wZk!;~DOeDmC{=%y@%5KDSeRAM<{)a(tlI>8l@?ecA&HqrM)TbN9jOH-=*|@ zN=H!oDWzj59Z%`!lx9-8kkaLpuBY@TO1FwC-A3t7O7~E@kJ96mo}x65(zBEnQd&V- z3}p={yN0ssD7%%iJ1D!0vU@3efU?Iady=xJDSMW(=P7%QvR0HuDC;uaF zl;z5XQ#O*a36xEuY$|0Llue^-K4mK?`+>6GD9fg7FJ=2FJ4D%0%1%&rjeiP+)QGPGw4^aLP<M|n5O2T?wl z@{cM1gz{08kEQ%G%Kwk@EXrq4KAZA+lz&C}YRZ42d^_bkDL+X05z3EKev0xu$}bQt zFQvSK@{3diDq^Ww-6%|xmq%u%hgUSRdFQ@V*DsQ3k9xCsr@VRrgSJKUEJ>^$1n}qUzsN{fDX-sd|a3 z|54SNst#23rK&$wL#XelIjf85K3)QX?s@*A6yIZJspHS`ZLbZp4YQ2PNgM?}$glZ#&YNLf} zs>KldXzY5j22-Wk1>Su-O zg+lcULiJLidWBH^qEI6UHEIYoY75aCu|kamp+YToh^&YE~C&))Zi$cx!gqoiUHAjnvn&X6;p9wX86>9DlYVH+k?iXqv6lz^2)VfZn^%tSmUxivX z3$ib+$z-hn^5Nw zq0YaAIxU1cZwqw>3U$T`buxrHSwfu|LY>({oq0l?g+iTgggPsPI^PP>I%|YF+l4wi zg*v;0I(vmW`-M7(ggR%0It4z97Ul6=Gi&Vp|Hat%TS%q9L}e5Zhje?I6VV6k>Y|vHgVD zfkNyUA$FV)J6?$WT!@`4#C{>f&J$u63bBiY*wsSp8X@)vA$GkGyHSY!Rfyd##O@Sg z_Xx53gxEqM_Ph{#L5M9CVk?B$i%}siPKc{3#MKw#t`y>~7UHfG;%*S)ZW7{d72@s{ z;_esX9u(pp5#nAF;$9KrUKQfr5aRwP#3c)H?S;7hLfoHq2MclU3vnZaxN$<<1R-vc z5I0ST`%;Ko{%718A#Rfpw?&A{7UH%Gal3@Ly+YhkA?~;kcS?xM6XJsqUtNf=A;i}f z;$wyQ1R=hO5PyXbf3*;Qoe+P65Py>pf1eQlfDr$X5dWwU|F{tUq!9nS5dWeO|B?`m ze^rQoM~III@##W*Cn3J85Z^KQ;1(J#Q!41 zZxZ5v6XJ7(_#Hz0ZXy1-5PwpL&lTd&2=N6%{COdvzL3yZNVr@yBwQsVTq`8pFC_e3 zNO(|4ctl8eOi1{rknmq2;bkG=RUx63kkDF4ND~s?5fajcgib<2Zy}+tkT5_<7$hW& z6B0fX5+(=_IZeOA90HN+6q3#l)?q;Fx z38C&ep>Cm2w?e3UQK%P$dNqW4wS{_#LcNAUy(@)!*9i5l7wX+8)Vp1%_cx*5Jwm7zn)P43Zed0Lj4nvv66WaM`uOW!DRr-6&jk zlWZCDl{A{G#nx{940gzAvF9{XgFSII7MhULuj~AXt+vfxLRoVgV1oj5N)_o zXt-HuxK(JlO=!4BXn06ycv@&!BqR}%stHN8g``*^DM3iOR7ko^NNOY`T_q&_RY>}q zko2gK^q7$Jl#uj{ko268^j{(AWg+P`A*qFs)K*C9E+q97lKP2;q=7=xyF$_jLefV< z(r_VZw2(AONSY=jEf$iN2ua@vNh^h<)k4w_LehF6X`_&oEhOy~l1>Uqr-h`mLQ1&T`4rWTWEA&RA}^fq0z%aqkjmEo)8*6B{X_QX!M-W z=mnusQ=w6^(5R)*C{<|GMrhPQXw*q)G*f7_RA{tJXtZ2tv{GoaPH41VXmn9%949nR z5E|DP8eb|jzFcU0rO@~lp>c}PIGQRnep_hVR%qN=Xxvq3JX>hIOlbU#(0HZL_*ZGBL5RD>KkXmknmC|b2RDbaNa*~siobv~q5qXv6f@4!rC0Hy$@?2z}m4` zI}U3nU~L}OPR81NtS!LW7qE6Y)^5YvgIN0u)(^z`2(V|_8!Z^Zg? ztgpcOy;#2w>knXk9oE-l{b8&>g7rtS{y5gRV0|mrpU3)3SbqiUyF*^X`cACx!p1wW zkz!*GHa>}si?DGEHr8Nc9X9@NjE&!6<0))Bjg4oqu@xIHVB=M6yoQZk7>L0P4Bmpl z+b}o~gBk__1|19{3=#}V3=YNMa14&Z;Ajkv!QdkpoQT0m7@UT|r!n{}2IDVb@Kp>J zVsHfpS7C4s2H(WsKQXuggQXZO!(aslw`1@V3|3*V27|R2{0xI9u{i^q2V(OeY@US8 zOR%{FoA+Sz=h$42&0k~lx7gh6|2>YIf57G+vH3hUU&7|=I4Mrfz{#07xff3EgOhK@ z$+zI-J8|-(IC(BkF2c#>IJprgw_{6e?SZWs*qVi{{jjw^wi?)KVrzh{0$U?&y%SsS zVr=W(*g6bbM_}uCY@LX$ldv@(TMMvt8n*rgTc5<%RoGgItw*u-Dz;nLJ{;R;WBXsR zeG#@7V*3hge-qn_v3(P^mt%VcwpU?$4Yu#c_T$)oVvOxqu)PaAE$j@ilVj%y>>P!i zqp@=gc0P=q+9_-PD3gW-i3UWVb77=8o8Z)3O^!|z~tGloks zT!!Ha3|C?JQw;CH@Lmjmf#Jg#Zo=>}41a~;6Bs^);nNsCi{Vxrhr65n1;f`c+=1Oa zu=_^r&cyE9v3me^8`z!1ZU?&&cKg`Pv0Gwy8oS3~_blvQj@|39dp&lSVD~2M{s6nT zVD~oc{usM=VfUxly$8GZGPe5&c6VU37eixLT#OIH_y~-T!uV*6kHPo^j8DV(42;jj z_-u?nh4E)F{sP7qVZ0FIuVH*8#{YrwH!!{qd@luSJVY~w4yD?si@&1ei7rpV!R9E*D(Q;S(xmH$pM&5U{c4ViAjV> zf=LgPAtv)MxfGM{VX_jF+cEhGW0O^wtifb0CJ$lq3rsd(@-QaPVDbtk+c4RV$xcjm zVJfD3W4bSEW23i0J}M&%yNEF{bBXdI6>v zVtNUtyS1-ix(L&6VfsT%Z^3jqrYkYM1JhNQuEBIIrW-N+6{df{bPJ|CF#Q{5V73Ql zZ^Udb%=X1>Kg`~WSqHNSvvGo1hFKr89J30uIhY-c*)f=%iP`y>U5eRtnB9fhLzw*% zv(1=2joGu9ZN=;b%>IPE3G7AKi?NqsZ-Bi5dn4=}jJ>0<_kQes7{|Tiu{RHUCu8qa z?45zVPhjsX>|Kk!+pu>Z_SR!>1NJszZxi;O#NKA?y^OtW*xQc1o!HxjeX&0S`!lhB z0QM)aU&nqE`)%xZu^(eUV{HE*?4N-BQ?Y+O_OHSIa_q0h{-fCcBliD>gMD$(#K9pr zn2UqMaqwOoyblLQ%IJg=Ii*Rr)4z9<+5**x$gXK6_iG$TRcpL}68ROuu zINTG5Z^Gd|ID9(}-+{x~I1F)E;IP8s92_2u!}s9uWE?KQ;rTec1cx`{a2XCC!{P67 z_zVtT#No?0+=j#LINXW(49sU@zBlHxFdy%a`2^+><~invVtzK}=V5+6=AXy>BFq!vZWOu!yjTaa?3r z46rD$sIWK=iv?Jmip3dNoQcKRSe%2!rC3~v#no6W!s1#iuE*kASloog_pw-t#WE~b zU~xAVYq7WwiwCe+hs6dgHe#^}i>DY{Y{gt`U{jscL*~PNN za)jkHmUFN?0?Q+@d_R^S#PUN}egw<;SbiMK)3N*nmScAj^^O#ARHZnqq#Ub2}d8r(Z_If3XV?4 z`J;1ibS{oQi=%(T(Zx9W0gi6P(Iy=I2B-JH={Mo@0H+Vb=_7IagMUBFV8$E%!3XXh Ky#EtUAN&81NU;_G diff --git a/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist b/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist index f5c01eb1d0..6b093433a5 100644 --- a/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist +++ b/interface.xcodeproj/xcuserdata/philip.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist @@ -8,11 +8,11 @@ ignoreCount = "0" continueAfterRunningActions = "No" filePath = "field.cpp" - timestampString = "374955033.430214" + timestampString = "375986878.0086" startingColumnNumber = "9223372036854775807" endingColumnNumber = "9223372036854775807" - startingLineNumber = "98" - endingLineNumber = "98" + startingLineNumber = "122" + endingLineNumber = "122" landmarkName = "field_avg_neighbors(int index, glm::vec3 * result)" landmarkType = "7"> diff --git a/main.cpp b/main.cpp index f366efad3a..68b9244f27 100644 --- a/main.cpp +++ b/main.cpp @@ -93,7 +93,7 @@ ParticleSystem balls(0, Cloud cloud(100000, // Particles box, // Bounding Box - false // Wrap + false // Wrap ); float cubes_position[MAX_CUBES*3]; From 09f3c00b5a8ca4f4b91d1bda4200b616da45ad0f Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Wed, 12 Dec 2012 09:54:13 -0800 Subject: [PATCH 026/136] Adding networking to send agent head rotations --- agent.cpp | 33 ++- agent.h | 4 +- cloud.cpp | 3 +- head.cpp | 206 +++++++++--------- head.h | 11 +- interface.xcodeproj/project.pbxproj | 4 +- .../UserInterfaceState.xcuserstate | Bin 104649 -> 76530 bytes main.cpp | 36 ++- network.cpp | 5 +- network.h | 4 +- 10 files changed, 172 insertions(+), 134 deletions(-) diff --git a/agent.cpp b/agent.cpp index 63aef1c6b4..093d06832a 100644 --- a/agent.cpp +++ b/agent.cpp @@ -8,13 +8,14 @@ #include #include "agent.h" +#include "head.h" // Structure to hold references to other agents that are nearby const int MAX_AGENTS = 100; struct AgentList { in_addr sin_addr; - glm::vec3 position; + Head head; } agents[MAX_AGENTS]; int num_agents = 0; @@ -23,8 +24,7 @@ int num_agents = 0; // void update_agents(char * data, int length) { std::string packet(data, length); - //std::string packet("127.0.0.1,"); - //std::cout << " Update Agents, string: " << packet << "\n"; + std::cout << " Update Agents, string: " << packet << "\n"; size_t spot; size_t start_spot = 0; spot = packet.find_first_of (",", 0); @@ -41,6 +41,29 @@ void update_agents(char * data, int length) { } } +void render_agents() { + for (int i = 0; i < num_agents; i++) { + glm::vec3 pos = agents[i].head.getPos(); + glPushMatrix(); + glTranslatef(pos.x, pos.y, pos.z); + agents[i].head.render(); + glPopMatrix(); + } +} + +// +// Update a single agent with data received from that agent's IP address +// +void update_agent(in_addr addr, char * data, int length) +{ + std::cout << "Looking for agent: " << inet_ntoa(addr) << "\n"; + for (int i = 0; i < num_agents; i++) { + if (agents[i].sin_addr.s_addr == addr.s_addr) { + std::cout << "Updating agent with: " << data << "\n"; + } + } +} + // // Look for an agent by it's IP number, add if it does not exist in local list // @@ -68,7 +91,7 @@ int add_agent(std::string * IP) { // // Broadcast data to all the other agents you are aware of, returns 1 for success // -int broadcast(int handle, char * data, int length) { +int broadcast_to_agents(int handle, char * data, int length) { sockaddr_in dest_address; dest_address.sin_family = AF_INET; dest_address.sin_port = htons( (unsigned short) UDP_PORT ); @@ -81,7 +104,7 @@ int broadcast(int handle, char * data, int length) { if (sent_bytes != length) { std::cout << "Broadcast packet fail!\n"; return 0; - } + } else std::cout << "Broadcasted Packet: " << data << "\n"; } return 1; } diff --git a/agent.h b/agent.h index ec3f42028b..596428cca1 100644 --- a/agent.h +++ b/agent.h @@ -19,6 +19,8 @@ void update_agents(char * data, int length); int add_agent(std::string * IP); -int broadcast(int handle, char * data, int length); +int broadcast_to_agents(int handle, char * data, int length); +void update_agent(in_addr addr, char * data, int length); +void render_agents(); #endif diff --git a/cloud.cpp b/cloud.cpp index 5e3a6ddd91..a0057d013d 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -90,7 +90,8 @@ void Cloud::simulate (float deltaTime) { field_interact(deltaTime, &particles[i].position, &particles[i].velocity, &particles[i].color, FIELD_COUPLE); // Update color to velocity - particles[i].color = glm::normalize(particles[i].velocity); + particles[i].color = (glm::normalize(particles[i].velocity)*0.5f); + particles[i].color += 0.5f; // Bounce or Wrap diff --git a/head.cpp b/head.cpp index 46dd25028b..12bfa66a97 100644 --- a/head.cpp +++ b/head.cpp @@ -28,6 +28,7 @@ const float DECAY = 0.1; Head::Head() { + position.x = position.y = position.z = 0; PupilSize = 0.10; interPupilDistance = 0.6; interBrowDistance = 0.75; @@ -52,7 +53,6 @@ Head::Head() void Head::reset() { - position = glm::vec3(0,0,0); Pitch = Yaw = Roll = 0; leanForward = leanSideways = 0; } @@ -167,123 +167,121 @@ void Head::render() int side = 0; glEnable(GL_DEPTH_TEST); + glTranslatef(leanSideways, 0.f, leanForward); + + glRotatef(Yaw/2.0, 0, 1, 0); + glRotatef(Pitch/2.0, 1, 0, 0); + glRotatef(Roll/2.0, 0, 0, 1); + + + // Overall scale of head + glScalef(1.5, 2.0, 2.0); + glColor3fv(skinColor); + + // Head + glutSolidSphere(1, 30, 30); + + // Ears glPushMatrix(); - glLoadIdentity(); - glTranslatef(0.f, 0.f, -7.f); - glTranslatef(leanSideways, 0.f, leanForward); - - glRotatef(Yaw/2.0, 0, 1, 0); - glRotatef(Pitch/2.0, 1, 0, 0); - glRotatef(Roll/2.0, 0, 0, 1); - - - // Overall scale of head - glScalef(1.5, 2.0, 2.0); - glColor3fv(skinColor); - - // Head - glutSolidSphere(1, 30, 30); - - // Ears - glPushMatrix(); - glTranslatef(1, 0, 0); - for(side = 0; side < 2; side++) - { - glPushMatrix(); - glScalef(0.5, 0.75, 1.0); - glutSolidSphere(0.5, 30, 30); - glPopMatrix(); - glTranslatef(-2, 0, 0); - } - glPopMatrix(); - - - // Eyebrows - glPushMatrix(); - glTranslatef(-interBrowDistance/2.0,0.4,0.45); - for(side = 0; side < 2; side++) - { - glColor3fv(browColor); - glPushMatrix(); - glTranslatef(0, 0.4, 0); - glRotatef(EyebrowPitch[side]/2.0, 1, 0, 0); - glRotatef(EyebrowRoll[side]/2.0, 0, 0, 1); - glScalef(browWidth, browThickness, 1); - glutSolidCube(0.5); - glPopMatrix(); - glTranslatef(interBrowDistance, 0, 0); - } - glPopMatrix(); - - - // Mouth - glPushMatrix(); - glTranslatef(0,-0.3,0.75); - glColor3fv(mouthColor); - glRotatef(MouthPitch, 1, 0, 0); - glRotatef(MouthYaw, 0, 0, 1); - glScalef(MouthWidth, MouthHeight, 1); - glutSolidCube(0.5); - glPopMatrix(); - - glTranslatef(0, 1.0, 0); - - - glTranslatef(-interPupilDistance/2.0,-0.68,0.7); - // Right Eye - glRotatef(-10, 1, 0, 0); - glColor3fv(eyeColor); - glPushMatrix(); + glTranslatef(1, 0, 0); + for(side = 0; side < 2; side++) { - glTranslatef(interPupilDistance/10.0, 0, 0.05); - glRotatef(20, 0, 0, 1); - glScalef(EyeballScaleX, EyeballScaleY, EyeballScaleZ); - glutSolidSphere(0.25, 30, 30); + glPushMatrix(); + glScalef(0.5, 0.75, 1.0); + glutSolidSphere(0.5, 30, 30); + glPopMatrix(); + glTranslatef(-2, 0, 0); } - glPopMatrix(); - // Right Pupil - glPushMatrix(); - glRotatef(EyeballPitch[1], 1, 0, 0); - glRotatef(EyeballYaw[1] + PupilConverge, 0, 1, 0); - glTranslatef(0,0,.25); - glColor3f(0,0,0); - glutSolidSphere(PupilSize, 15, 15); - glPopMatrix(); - // Left Eye - glColor3fv(eyeColor); - glTranslatef(interPupilDistance, 0, 0); - glPushMatrix(); - { - glTranslatef(-interPupilDistance/10.0, 0, .05); - glRotatef(-20, 0, 0, 1); - glScalef(EyeballScaleX, EyeballScaleY, EyeballScaleZ); - glutSolidSphere(0.25, 30, 30); - } - glPopMatrix(); - // Left Pupil - glPushMatrix(); - glRotatef(EyeballPitch[0], 1, 0, 0); - glRotatef(EyeballYaw[0] - PupilConverge, 0, 1, 0); - glTranslatef(0,0,.25); - glColor3f(0,0,0); - glutSolidSphere(PupilSize, 15, 15); - glPopMatrix(); - - glPopMatrix(); -} + + // Eyebrows + glPushMatrix(); + glTranslatef(-interBrowDistance/2.0,0.4,0.45); + for(side = 0; side < 2; side++) + { + glColor3fv(browColor); + glPushMatrix(); + glTranslatef(0, 0.4, 0); + glRotatef(EyebrowPitch[side]/2.0, 1, 0, 0); + glRotatef(EyebrowRoll[side]/2.0, 0, 0, 1); + glScalef(browWidth, browThickness, 1); + glutSolidCube(0.5); + glPopMatrix(); + glTranslatef(interBrowDistance, 0, 0); + } + glPopMatrix(); + + + // Mouth + glPushMatrix(); + glTranslatef(0,-0.3,0.75); + glColor3fv(mouthColor); + glRotatef(MouthPitch, 1, 0, 0); + glRotatef(MouthYaw, 0, 0, 1); + glScalef(MouthWidth, MouthHeight, 1); + glutSolidCube(0.5); + glPopMatrix(); + + glTranslatef(0, 1.0, 0); + + + glTranslatef(-interPupilDistance/2.0,-0.68,0.7); + // Right Eye + glRotatef(-10, 1, 0, 0); + glColor3fv(eyeColor); + glPushMatrix(); + { + glTranslatef(interPupilDistance/10.0, 0, 0.05); + glRotatef(20, 0, 0, 1); + glScalef(EyeballScaleX, EyeballScaleY, EyeballScaleZ); + glutSolidSphere(0.25, 30, 30); + } + glPopMatrix(); + // Right Pupil + glPushMatrix(); + glRotatef(EyeballPitch[1], 1, 0, 0); + glRotatef(EyeballYaw[1] + PupilConverge, 0, 1, 0); + glTranslatef(0,0,.25); + glColor3f(0,0,0); + glutSolidSphere(PupilSize, 15, 15); + glPopMatrix(); + // Left Eye + glColor3fv(eyeColor); + glTranslatef(interPupilDistance, 0, 0); + glPushMatrix(); + { + glTranslatef(-interPupilDistance/10.0, 0, .05); + glRotatef(-20, 0, 0, 1); + glScalef(EyeballScaleX, EyeballScaleY, EyeballScaleZ); + glutSolidSphere(0.25, 30, 30); + } + glPopMatrix(); + // Left Pupil + glPushMatrix(); + glRotatef(EyeballPitch[0], 1, 0, 0); + glRotatef(EyeballYaw[0] - PupilConverge, 0, 1, 0); + glTranslatef(0,0,.25); + glColor3f(0,0,0); + glutSolidSphere(PupilSize, 15, 15); + glPopMatrix(); + } // Transmit data to agents requesting it -int Head::transmit(char* data) +int Head::getBroadcastData(char* data) { // Copy data for transmission to the buffer, return length of data - sprintf(data, "%f6.2", Pitch); + sprintf(data, "H%f,%f,%f,%f,%f,%f", Pitch, Yaw, Roll, position.x, position.y, position.z); return strlen(data); } +void Head::recvBroadcastData(char * data, int size) +{ + sscanf(data, "H%f,%f,%f,%f,%f,%f", &Pitch, &Yaw, &Roll, &position.x, &position.y, &position.z); +} + void Head::SetNewHeadTarget(float pitch, float yaw) { PitchTarget = pitch; diff --git a/head.h b/head.h index b0182cd498..f3938fc396 100644 --- a/head.h +++ b/head.h @@ -58,16 +58,19 @@ public: void setNoise (float mag) { noise = mag; } void setPitch(float p) {Pitch = p; } void setYaw(float y) {Yaw = y; } - void SetRoll(float r) {Roll = r; }; + void setRoll(float r) {Roll = r; }; void addPitch(float p) {Pitch -= p; } void addYaw(float y){Yaw -= y; } void addRoll(float r){Roll += r; } void addLean(float x, float z); - void getPitch(float); + float getPitch() {return Pitch;} + float getRoll() {return Roll;} + float getYaw() {return Yaw;} void render(); void simulate(float); - int transmit(char*); - void receive(float); + // Send and receive network data + int getBroadcastData(char* data); + void recvBroadcastData(char * data, int size); void SetNewHeadTarget(float, float); glm::vec3 getPos() { return position; }; void setPos(glm::vec3 newpos) { position = newpos; }; diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 4a2816dfcd..d6e73ccf53 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -302,7 +302,7 @@ "$(OTHER_CFLAGS)", ); PRODUCT_NAME = interface; - SDKROOT = macosx10.7; + SDKROOT = macosx; }; name = Debug; }; @@ -327,7 +327,7 @@ "$(OTHER_CFLAGS)", ); PRODUCT_NAME = interface; - SDKROOT = macosx10.7; + SDKROOT = macosx; }; name = Release; }; diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 752de4bf0043e4b1c6967cc4d4c515c7947f5a6f..ddda5ce8a0d2fa0e0c02813fdf7f72dd3a346f74 100644 GIT binary patch delta 26961 zcmZ@<2Y3@lx7D^_N!xej-Wy1^+5^VEEA9bMKimGk5Op&d5hMe)04iK1YEoY84~r+H^vR z3md!M!;n{-)c9mX*H@M(>nBT+CCmEDhR9N7S+YXeFxhaKMpiDXkk!cQWMgFYvWc=u zvdOY3vKg}1vbVAia!me(TqdXFoLrP!%kAV&a#y*hTq#$}{p3ONP+)OjU*x~ZAIhJ|U&vp} z-^xFb82JS$BPo(2MbetIBb`WB(vwt@YSNDkB16eYvNPF*j3*PwK4cP^Lh1&SX=EWe zj2upCNG(}J7Ly~$5^^+IM~)%u$+4u4)RTamNzNi?lXJ)xauKHhG79LOvy*kPsb3{iq}=nd(oaP}x)tl}nAF zN~lt*jv7PNQ)4L|rKbQjj+#Zyrshy{sRjyC^Qie$E47STPOYalP#dXlsJ+xaYCm;= z`j+~RI!GO&PEn_+Gt@Oocb&RH-K1_&x2ZeSUFs?IjCxMJpb;9SF&d{?nxlDIpq*%E z+J$zd-Dr2(gZ88Y=s-G%4yNPiE_7GAADu)e)BWizI-Aa+bLkOu30+E;(PQX(dMrJS zo=(r8XVQ&y6WvU=(5vY+^jdlyy^Y>Z@6gdZ>BIE*^bz_feU3g)|3v>x-=^=-cj;f~ zXY_OW1^tpi8H~Xhg5emR5g3tiW?UFo#*Oi1{1|^Gfa%P{FtJP=)0atP`Z4KD29wET zFdY#N)+X0thLK3l*RvBhjDTg}$6quFunWOh0`N5?K; zo7ko7a&`^-6}ySu%I;?OvxnKE>o-em8x57;N{3-%B81BY=^j^PxX4d=wU za-BFO=gS3iAzTC(%XQ@vxV~Hpm&&Da>0BO{&lPZm+%T@3tKcfRDz1*3$W7uVb5ppf z+%#@FH=kR;=?vULZW*_nYvWdN+qmuA4sIv6i`&iZ=f2|(a)-FX-2b@C+;#2-cZa*n z{l?wr9&wMk=iCeK9rvF5z)N_9m+~@R&I`QAD|k!Zk$2*q`A)o&SMdRSARojB^C5g^ zK8BCw3rKR=L9~8M`-!>YP%%%;7YoEfahNz<)QH7mnOGs#iX+8Y;%sq_I9F^C zp*T;RFD?)b;zF@mTp}(LSBR^{b>bFrtGJDQEN&MMiigC*;`iba@wj+GJR_bJFN#;i z$70_n;#2XN_*{G;z7$`Huf_M`2L++{LP08cg`lueI4P70mBLREr0A@OQN$|Z6g?EZ z6@3)_6+;v`id@A|g;r6dC{|P{sueY`k%~p}H}Dd*&0q}zWKLqgm0E+=w z%%*8{5@La#0Jt~6`)<0)9JFX~bA*|lHg6hfbN1us-J3k!KHzo;i_VvL8h_A6eUkpD zKE?F&ziieOZX7Pa9M{qC^&cj(xmlQ{rk0rCu>TVVF$TU3W&edK!f zK!=bHp&i0D1!=}07(kwH+NzC3rKcrQNugv|hd zeNhVnma03V78dZ-2v2x&gf(PJ7=(sSB>@NrZG2Ryg$T!%bV97)?zTYG!WKSh>w{W2 zz?c=msD(4kEsa6kU~8!w@q{Nz0}v(r;HyS0yrF+tIN}TSEB#Q50Jx~k9SMfWDtmaf zjDiQs9H7H0vV%pW-X@#1gK_>=(AQs%T6Bgn<-tfCoVH4hT6BZ0{@$oX4|u{q0JZ1^ zeJX6Ba|H#73Oks&nnW##O}yiwyX_A^^#T{*|7{ zP^b^|LoEv6k~QwA#c;TxvJ+ASk)VzpEJ}<5?4f^^4Ro%OBjqq|ZAa9i3f2dyP>Wg! zs@##$@OY3vYEci7U@z1{4}F4_sKt2bUmb)@g0q61P>ZQ>eQ+3RF#{f`_C{vIht=Ll z1N8gS6}6ZTV`>7Cg|Ioq1+{2~dqbR2i^cGIh#P9r3a``zAZ?JXbwgIcj8GM7u@>gk z`XFDy*|lEC26*x-H`HPiyk6@GuhkMTFieSB?0{Ke9;n4`m^so5*#|d<`J)!!!c$*+ zqZWtYhp&~W#Ss`C9*SBVgNdVjkmIm^lnOZoPlkJ<7H8qfQIW_`&}D-!YH<;s9AyXf z5l*PZ71$c#i&|WV`!;x^7PlHA15k^*&}CyFYH<(7)p;Nf;Ot00)Z!65V1%B+AM1jV z7tkw8iCVmd*P}&f>+XbF{0XN=`J)!^;oc}8REofdQNE}YhfdL6sI&v@6WtTK)?320 z(Ltz`g4d%1Q7H?5ulGg-=)2hqm0H5m&hDtx296l(j@ZMUo&8X$6V&N!pl-_-P#z;k zrLOSt*Z{->ChMG_Him*OIu}@`qY*Et(|I92aQ_xHD)ontw)91%K`=RX04fcIr+PR+ zeK$*ZIaYy6BjEL|15s%-eB9k=Rlrhr1uBh&NqfzgAM{eVZ<{SD?Fz4N4T8*e7gXB) zGdik^8!AnJ$+3Qr-G;+`+nrHqA9!PX3@Yshx#MOEO;-_>_J?y*&6i7eSfSE^P~XdR z+0czer9&_miG#5VZu7;|3dsLbaohSA{hQZdIPB3Q@1NX+; zpi(W|II$1hFp+@qlRV({gfHOri3CjRPNC8=7&oa4QVHKDsNsd~3RGGHA9qjaART2) z?NGR}dnXt>*$Np0^*#EbQXM?iqZcY22lc&_P~Te)JMU#t=|q^ccQ7iQ0`)!A@Lo?> z*f1p!c200arL$ntUNxMaphBf{8&aL%$%!o7JJkxA2j3@jL8S)hyFUt*HbH%_Zm4t- zWRsns^8pSXpXPupg~)+8RJz<`eB46;r}wr-r7Pj%?m_TEZv`q{10VNJK&4;88{3`W z;~q4OogpCW;gZ?rt2KQ*Q0X@?x0@4u)F%LyZh`D~g{X8pbnX`b_w;o{rMuwVxk|`D zIeh<}1Kc#z3qI~Ah5ZgXq0$2;(hYM3ICB<_9E2MWrlHdBp>w|^RQf-deJBN${s@=M zPC-t>M_Zgsj(wBtI!MnLN39R!`m13}k|!!X4>um}g-S0#Hn|Tfy$rLGQ&H(PICpM; z!M0E|guwlS ztx=gZ{sqv_+Bjk>ULwzcP z%2aS}YHw7ghS`Ud@NqvG^vfsU!$y0=4?3S5h{^)tw6tEREChbt)Exf)DsOPm z^0EQ4LHejx*+78qZIulM=swub*)C9)u8&+M%aCOPbU#23o{>l;GKpM5%5r3R`mklP zT-i{79suaK%OuBS1^N(xeg}bP-~d^XtmOaUWk&pvTkQBUiSdu|zA2a7l~w7pTV>S% z{r;@vH;GhoU-Cd!D|sjzB^wRUqxu|x{tuu>ve{Vpw5C;Jj6Eb-O>^dvw}1N1CF&ja*lfL;XXWq@7<=yibJ1n6ym z-UaBd0R0W14*>cQppOCi6rj%m`Vye80s01@ZvpxqU=o0#0K)^Fct1lUu6y#&}BfV~Ge z3L@|?04@VK4R9XdmH@W}xC6jl0PX?sPR2>ek>pB-%HBxw^|k;#r8nhDp=6lsJ=}1n zyQLg417|)1azbxo7^^^{VZvEnlv|kbbL}_@OV4JBa>fk&)D8&n#@Vhma!WICp&hU_ zm-e_1YN$OYh;j$B$E9`;7dZD^4^i%J2ClRNo#2hL{?Oq(FUq~l;I($p*93OD#EbF( zGkBvN41pUj#lTIPd+lyXu>5C#IR7VBDo-(k_u-t2{xIUQt0*60w*21io&h&r3WDP<*-7QuX7CZ5 zb1?v}xZ)y}=bJ50OqT0Ex!A}xX3Mj7f)PgHT?{~hL_nLXoQ=H9?DC@BrON2i&FJC_ z6FCZYy2eT6wPv?hChn-8T`c8e%$7ge1NG*K?=jDBcG=fhJM28i;7CICa)ffiVHIo3wL#0+5V!1BNJ^YL8* zT3jJT`AV}J(eAd^*ep!F!in;)%wUIha3kD!HwccnXe-J$n?Y$ixE=Pr768Axz>4x+ zW>DS^?laA5=~Y&ge`^M*cJMGPyBy$Jkelx_{PX&fA2qu%?QTE9@}J{G`AIXtwF75i z-)mh&`A=p*Xa_Et()sK)UX))ogNksv z<6v{T%)TXskFGn2@@Hl@yLPu%(D}wfQEo00m_s}8-Xy8?>KCqgWo4gxOPY%Y=JeT- zB=k1W-zC7XL4gQRdy}*w&4mMVY4;(GVnR)eZr4pwB+V5Bb8Ghy-~m1cZobVrwht|7 zt|ypByPGXcx|J!C<}!kHYzJK6fm^*r(p*L`WjoNxIC{#w;?nBB&KhYhDVSHgqpwk8 zU-N)%zHM(qnhOi2Zg&YWy2O~ep}1o&lIH4y`L=sR!}vREMbcbUF#mR-n{hY|Q7(wo z5Nm@-Mbcbeu)ubg-lh?sewP=?MDsiaw}buR#=D6kInWG*wgaiK?6QAMb(N;roRr2V z3rjMC%+*J=k(p!`nN8*ZEF54F0E+}zR2w;z%#*0e0)RyWtg~r1AQrBFGFsSLv2#LZ zBbHGY;O8IW?d%`w7X;Z~ML)8PtTvLBlNDqoSp~2z0P6~{ZUBpKBWuW7iJBY*uG0XTj(+3{a;-6NCAo@RO|AjhAb<@9*bsoF zwvp?|FU>O30G9q)=4jaGH(Nh)8@byUxSiZV?j&~sEE8Z^0LungP8+$0+$&L&`vH~< zu%V{FJX4G8eHTCSf8>e(Z1E&{%G_cBzzRRMn0wz&NB&G+F$VqX)LbR6Nz~*GfDH$j z#uTJ=i`!apsPRg)OKP2;x3j-rU~v7Ax{%;tZ)d;y&re?DFXRIw>Az0V@8m;?ntTkf zVt|eK!o*tA@W9=Vd`bTKpZ$JIzLTiQ4*)9zSh*>t0uFef^rHw$_FpYga*C9wDH>o^ z0IN0y)xesk?tYX=*%*WVZE7f6%FaAcBLOzb6x0*aPwaG*E7j2$^lynkb)u9KHRT1c zI)IHa1=X7#X!-78k|D-{3JDLYA5s?_81}buqWq~4eN-D2Km}4kR4~AF0Mi2u0Bl?v z6-tGfg^UN-1V9+K$&9%?8g_eT<41L+jIRV#H!7a$PW1rTB!Eo@*c5>x2k3qAZ+xby>)bRfd(okA+&>Vox{TPJ&Zs$jp zQ#D3;6;vfvMO6a~0&E_@<^ya&8&ykvfqsGQtBg8FT{H?gPyIywOkDuj7JzL9*fxM|Z=)_zmnCZID!_IC zY^RA2*=164=TRpe^(*zeQOUpN@m&3zDjkz4; z_xD_WMZGn0{_9x2quxu@v;<%W0QRjpm%oEIo_U1RU(oXZQbLn7B~jB1zzzZIut~}H zMkQ%)PCow?#yofOqZPERKB|qjq^)Rc+6G`p0ro$D9Rt`8ZL}S2FHzHu0Q(VO$4${E zOarj}v8RsiM63U6Drg_tSE8o<0d@*tr%i!pj8$*XnaPzF8Fit-VaCE26ckXOS{D`+ z7Vx)GphM{>Bk{kL7CM^lY@TD;Ie?uvG53UPUpVUMc)Hhr%IQt_k*MiJfc*@x3#Oop zCOJb5l4<7o2n(xEtMd;v7GeL;_WYm+(5e5)nntHf)O04mE(46cVxqljs_h9^hhBzD zf42*#^Jvq;|F?9dHMCZuri%e~9bh+1(Kn6h`mkcfgcs2Bm7|WXplkngAV$)o%mZ;7 zV0X;p;1;(v>s9%l<==kn(qH?brzierKPJ(WC2D#qz{inY9^a6>RUI?)J0DE8}{T&LgJNnU!>E*_tf6H;Yjb0&9)2jgX z2w;y*K~JFewaSnFivH$5TiQf#mZ<5i0DA_o=cb?+@Zf8uAHADC@Sj27(%(tc^dW$~ z0@!O)&>zs@50xK%j6V6F@=npGC2IOC!2Sf-TT{?GSo=pO9et6$W(@k*X}(V1kf`Zf z0Q&%Ni7^OAjMMz`p4TRS^8-LYSV(fiE$ z8O#iksF^f?I|AIvB*)pf>UP=Be~_I~7Z4Z_;_V#Z=VvUDf&M`ue@hJ}hbj0^;zDMa zM9pXbKGYT9ZYE}TQ(5}j(dK@7U09&8#`y;X)~D9_2L%584#bo)RYu}}Tb)cbQ)4PQ zxF^6ne(a|IJG(wiJu}W2^sm`7o|zy~Gm`+W1h~p1#>uhAhl%7yAXIlJG@qJV|aoh(Xj4=RAkH>5qMBL zz7alr*I;FOCu249bpz-2e|W@7z9M_A>jJ{mjDo z^@zPa9uDvbqvjOjq--ib&mB80(MpP#L(E}A9D+E1!F$xt!1DoK2=L)wvmDE_0xPl#){?bityvq^mbGK; zSqIh;;97u>0C*L^NBRML48ZjOp8)V_0G|o)SpaVU_&k8m2YA!xd}3Wp@8WnrLpgy6 zhNn29ma?8^Te8^}Nf>SBtp+^BXp5DyerA_}W|tpLF2QEoV6)Bg3u9P>*_LXyWt(g< zW?Q=1w%TOtX0~OTZBIMC{o~hE^%kDG;w0b?I zuNmx2!&@n0i?g%X*#=t~5}{)o*m>>B<^#O?J$AsBmI{;fRFws{{~ji`kDCaO#B1?%D=_PPqy=K zXLq*C-v#jcPyBn0{QCet_Mi0Mv3fSxM6Uz5t`gvb|H^;F$WOHMA7hP`<>Txg2RQh| zf6B;z8sOvp$$y^Jvk@l#@h17Hf2F@bJ2vWY+4$p1S#o}I`(Vjmk^NJP-#6#$>YqNgQN_9^>}eLgrb#V0?vDi`2WyL6D4 zli?-%Dl;*qZ*^7f(Bgt_xmCH#*_Z5VN$H@(6ysI9#y{D2?L+V$;L|@!Z4jY4}inZE7d-v9rxnYpWFY3G=DD87#aZZ1+82V zzzxRrtK-6q!omT*@Snn>Sv{L+64q!EHta7+z;!X|lC`%R&-M7+F4q&_&7b<++eqIB z;4S~8H}-p|-qW}S%^laYvkl}18>J2c_~KS>2*8&ZDrv;Up3CHN^ij*WEG`@1tpH!P zjLYSQ0(?2Z+YB3MB-fMEa3l0l%Q-Dq#1#X4CBRn!eD!j!ge&FB0KNv`-vazQLnwok z^yF&vk!@TpHtIz6s!)4I5a*sSDS{E&ey(CEQZ372sO|z762p z-C`b_j*2CD5@!!o;{0(Xa4P{mbsNVa8Z?eu!)?$ngX>$opDJ@+-Y-XwD;z;^-s zD}e6?_@0&AMug=yahnkqVFA8Zp9JuI06(Bl(kB_N3y6#1vVct1aeE|EZZEgbxX!9H zBMOWUE{AUo{oVlZ{ifB%9pJt-FRX%P+yO~xHp}}5_=fuW2RP?eIR`}q2RJA7Wqu8CZi#d+FdhGM zuedk*uw~q9?hk;U0{H1=+@IWAfS&>Q*@3m1{Ho!Dh8Jjt4X<*GoiN8VgKn{t&8PmK zMu6lo{)@kJ@Ev#yfS(8WPlnD`h@Cx8@vJ^<8BZHqxnOLC=Xf6A7Xg0B;B1Y!*z?xB z-9J3+c?W=B0r=IA9Im{_KU(qRI|BSVz;Ar?@aBDu9BSSN;I{yNdl~P?`vd$A!0#IT zt&mRkd>9{T^awZV_|>E%ijM~PJ%Ima)M1_eahmzA`pC6>H$Gmrp6|i;lzq+j;(N=! z;`;*p0l*&`kM8lu0Do#c#>bxn{H5_oAAkLM^7th4P`97G@&i5}QQGi>&FG`g&**&m z-w&twEVIXxc8{T-PiFW6GuVDE%WF)a@hF-vHiPXavV7U+vpc@h48Cfot^Iru$&WUJ z?MJfw*uNbZ;pwJ#2p;g$^iiw%ar}6G0zZ+T#82j@@KXW)C&1qV{2jpG1N;LZB!EBw zfv%RQ`5F97iCV(&Q~0?OcjGG+fdRs}2cXAHPK2H{l-MJ7hHvbUWrj*SBu1Cb+9Y&~ zAFL_KFRdL=T3S3bw<4i(kfu^Iw74LfwX8GluLlO#dpp z|I-qw;3l{WL|Q_AK}nUSP*YF=2p>SGhm6S8l=$S8m6>iTLMK6KSmKO0Vao)SVUaW9 z-pdrHHaYtZN-3?bFx^Q^R)3QOR>^&Q0uqw+*{wn{AR-MNJrU z42T#&!~!C21;PrMLKeabIr=0(bOA&+rs(!bqc~77#sJg;9X$Y5Y1{kWjCWTqcYabbv?zL}GiN#|ablVatT^ z!URC{0z~g+!X#lbAo>8JF9e=0QJI5sD>TL@?8-r!f?BuOu^UWxwRVD8!rXrlGzbt7 z{Q!|<*xM0t%n}v~jeq;fnb0CEZkMH}0I47J3L>3^j0g>~0v6xrLr^}CP z;M3(b6Po*%j%p>8k!Pyn@ z!i6W2QsXC;)C@cmo*TY$MLe-(!VAMXSHxc@ycXWH&)=Vb82)Mg-W%udgD3%n1`wYs zGbv)C@qyEdZefL{XAk>}cn|r<6#E#;2dZyYnXZ5pJ=i!~X7WDO!q7`l!{S zm1r&6h_<4gXfHa5j({iyL>VB;0Z{>nNueq0C9L*of09C-eQ0lp^sW628uyquoxnSieX|nAVvaW6d*XGT{lyeJ zYMcs)X~uFhBb(jk?2Skfov4*a5qGgDBQd3aZppBMuBFu_Re+doN)2&@Sd#W}(5swXfS76auN14YS?=>YyJ@#4rJ!oLSS{8VUn%|{WLT|6!gS(jae_W- zjaVm+5$naVqE6I{KpZEI2gDpe%mqXPARr*-0b)KN768Hkh=pq;YH^Y{S)3wKi_^sE z616zfbboIIL=zxdOrIA4VzKGNl5E!6INTK#r4`*Z#Z?6rnv!AhCB~|kpOs+9_CfaO z#740R5Y0=?J!ugaWqj_3sS7RQVoB-1#FUhRD%0$Ujbf|W-}qhM17h2!X%id8Ri6Q| zQCw@h_*CjF#jnJT`l#jN*W!9{10Y%fu?!H)my6Ul#yJB-n`yEPFMN^o&f-pSpFXNh z+$HW7_lSD|u@Vrg0I?bnYud#961DiP_#GhDn!5NEAig#i!jyvIg1oANd;|1Dq7CEx z5QW{R)=BYy;t%?;R`D1h*0qX10^&=oxOYyrepKx|tj-jk@s`{D!R z5)vN)V!KK84nXWTelR`(AIwVV7H`-!ok?A&fKmeAiizKTofMqsO1V*L2V@b4iJ8T*tcB4 z8Git=&-`~tQblQgbzW8Fr#MpKt?+4Y&KD4eK0%g>07c;623--Nh%mO{7^(yKi9K61HYpkk0>Fd$9<;xr)6eTpX)X^PChS8+v_A{!7V0ddNJ1|uFr6#0r_ z|L_>D&;a5LAkO~%O~Ea8ivI8S2t|pa{2#Fuib_D72gFYXT`*#=`~0Yx%`%B8y$ePa z(7Z~H93x)KsJ+$$R%Vextv@&UOj_2fqK2>Czq2l6<1iabmHMCvY* zSIFz+E%Gk;jg+0;cO%u&30#dvd!#jb}joQ`!&0b-N7Lo z&9NNMiJSxH#Esxab7Q!%oSvJ?P3O*Yx41jpFWf!uDfgW3!uR8o`4oO2pT+0!8~MHb ze*RnjApawOLf{1_!A0;Ed<31J5Fi8zAwrzcRfrdQ2nj-Op-xySoD*>|NK6tl#cXk? zm@f_!HDbLuNt`0i6laTb#l_-MafP@_TqAxhZV@``v@d?vnD5Q+{8(zr5> zD^Ovha8-CIA{23o1Vw+va7C$Nx?-MUfud2-tW$ic*s9pC_(5@8aZ+(wan{nq(#JB= zGTO46Wp~S-mc1;KEmJH9S`N0%wk))qZV4^dTW+`9X}QO8pXCph$1N{eUa`DpdBgIS zaNuzt0&fSYs%Wv z+S*!et@E|^w+^%pwhpxpw~n-qwvMrmv+iZx$2#A7gmtO)RO<%odDa`OcUbSTzG;2m z`ga?djbNj&@v{lHiL@DIGtOp$%^{oPHYaTvTWece+hE(ywq0%e*cRJX+Ctkkwp(qt z+wQR4Y5TqHQQOnDXKl~h{%m{E_Ll7(+h1(&+1|Ia)7f>mtF~Kix5w^(c0bsiusdaU z*6zIBZ+6e@Uf8{{du#XJUT#m>^Y)^>rM;cKgT0fz+CJ7k!G4ha5c^E~Z2MgMLi^$N zTKh`-G4^Baf&F;9EJ)S4S(y?v6E%4US73TOHdR zS30h7T<5sY@rdL998Wl&ay;XB-SMX5FOK&d?>jzreCqhz33XC9IXZbcsht9xf}BE} zBAue0Vw`$9G}&pY({!hqPUoDiI^EYf{qFSK>7~=rY=6yvszFNiMTp=DI9%X>(cWvf5>>%a<-+yKHdz#$}hw9+wL)H(hSK z%3VcQOV>cxC|6x)*HqV`uKBLBTn(;`t`}Tyy54pp+-Nt}&C4y!Ey69*Ey^vyt+(4C zw;^t6ZW(S_ZaHp6ZX?`E-OAl6-D=!cx}9>DxcjF{iypf_ml3Y-OsvTaKGe!#r?kfdyfttqKBo2ozBC-!^y+l!_%XahrdT( zk3k+oJkmWfJxV;vJSse@JZd}+c^vn+Fc;fNQNu|B1f@={q?C-3QwmCj(n@Kgv{O1Los=$0H>HQNqf)8#QmU1{N`Gab zGFTa^3|B@fqm?nrIAvF5yt0QfLD^f`SJ_XQtV~f3R1Q|AD$|vj%4}t>GEZ5c9H!JL zio`P zE4L|kD)%V&E5B17RvuOUpgf^GtvsjvS$Ro$Re3{sTltIfH|6ij$I55Qm&!ksZM>gXN>!^Gt*Te)RpV8YR8v(mRI^nLs`;vgs%F(vhcQN3Z){&%IuH^WJve4&LG3UA((_7kgKG*LpAXZuMU7eZc#F-amN1QcKjR+FKp0 z4pompZQey)CAermr#e%XGxeq;S6 z`%U#*>9^i*qu*)2OMX}Up8LJ?``~Zq@9yvEpXfi>Kh+=jPxGJQzuJGJ|0e%4{+Io) z1`q*s02|;H5EKv+Fd!f^AUmL57cePcO2Dds4FTT-oD8@Sa4FzzAQ9Lh&?Qh6=p8s9 zFf%Ya5Cl#OoDukC;FiE`fj0tw3w#iS2FZh{ps1kupdLY*po*ZXpanrof?9)i27MQF zDCl9(tDrxExnSF1`{2OfsNl}QLxOXI^Ma=b&kJ4m4>AY*5(busLB3VcWwFgnbwGJnUWAhj5Q@pK!nM zl<!BMb<|yiCi7IHu7HN)5zyhfl*OWouh_Fl}A-Z&5v3f zwKVG6sAEw-M*SM~BS-99UT+hGrCuFQFK*wP4wdERncpre~3O8{ZsUt&RAz1 z(b==JZ)g9`13PDR>CN`K!+BJHLs+Vu+aTG5unaV;04%j9DG?Ys{0FXR&dyy<+>s zE{tuBT^{=+_D$^DxE^syasA__#LbO^ai`-h#a-#*)TL7wRhNP;C0)w8Z0@qB%f7B1 zy0TsQu1Q@}yQX(-?%LLMW!GC>A9Q`#&AVH0w@_WTQQgLMo6v1{w?o~&@Ae_yB3>5X zHNH=LV*IRlLwsZWnfS}`SG&7+S9kaAUe;F{o`Ff5@gp(0^z!lr~R39ox0 zy|7+gd-ds+*h@F7m!Vfw1IUS9|}``(7WvKH+^L`;6~1 zqtC29=lfjibEB_SU+2EAeJlHp={vUXp}xoao=mh&bW8L|EJ>_Q9F@2y@o?giepdaQ z`?>bZ>sQ>bq~FGVJNxZUB9jzJR!P}O!;`d0>yx%8?M!BqZIbPhb9KquLI#BmsvV>sG;Yv=LH`@{ z!(fZS++bmF%HWK_S%aGfFCV;O@a@66-v>V$;y)x}NYs!?LuL<|JLJlcyF-3WRi*}{ z2Bnsz)~D)H^{F6rR_dJ8=F~;0OHx}?m#3~w-I%&LbzADr)IF*DQy-_1X*OwgX^v^m zX>MtrX})RxX(4IhX;EnjX}!~u()yUhmR6NklUA2jpQcNjkTxl8YFdLX z?d!B{X*<$(r|nBSnRX`aT-wiRm(n|?`=v*x$E0^pPe|{bo|rx`eMowGdRBU2`txsHvN5uBm>JJGUyC8gUhhZ zu+Q+%2+9b}h{%Y}h|TDl(LEy}qfbV^j3F7iw2X?3x{Ue^L&nmKWf^-jzRx(C@i^m; zj6XB2GhH&>GCODX$V|wboH-}6A@fY;<;<&D4q2XAowB-SC1v%`O350Km7A59RgzVf zRgqPdRg*O;YeLrKtZ7*@v*u*Mtnad}Wj)AxnDr#kt5_Na_n;)bDVSBa#T6$9KW2voFO?`IiqvN=S<9*k~2MLVNO%ds+=`B>vF!z zS)a2pXH(ApoNsdu<$RxWH0Ot$<2gU&T*$eYb1O%8C+AJhyIe^wmitAnG?&a}a`{|E zu63?Uu3K(oZnxa-xjDI-+@jpcxpQ(Ga(Co@n|m<#M(%I94~BLa$`0j+CJjv;nm)8; z=!&7MhTa|eXy}tXGS4Q@F3&#CAy1X(ofn!Hkr$m8o7Xk3dtP$hfV{zZX?dA>IeByP zcIEw+Z==gk$S=sR$gj$;%^#g#pRdoKm%k`~Nq$@Ys{FP2+wynh@6A7ue=z@O{;~Yy z`Iqw_<-aZ<3OW>!1xx{7U{zpKU|---5LeKvpie=+g8l`01%(B}3yKO#3icHoDL7Z~ zQ^D1O>jk$8?iT!B@VMYv!OMbog|3C23wstO7p4>rD%7PG78VXKtS%f`SXVf<5EM=* zoLo4s&`{V^*iyK-aB1PP!Xt&ZhslTe4@(|aGfX!OXui-$HKc~o@EV22T4Sei)VOHe zH61l7jauWU3DksW!ZlHv7)=*Vyr!q7wkYH~DrnnI06Q>-b~RA{O- zBXycO%~%c4Owdf$Ow-KN%+Wy20!^c)MYBY+OtV6>TC+~`wPvGcvu2xSr)H04zves5 zVa-v^51JF2)0%UdpEZ{>S2Z^@w>7_Le$)J}d8~P+d8zqB^H%dgi)sn2g;uVmwVYPe zT4`;y4q9ieo7PjS)Ou@uwE@~-tu9O(sqL(d({|JL(Du^y)h20Ew1c#%+6-;BcBr;M zJ6v0&Ezy>1tF*P+(b{^gUOQepNjp_LLpxjBpq;N>sBP9R*0yTfw5zmhwO?sBXg6uM zYIkUNYxilt)gICw(H_$t*Phaz)&8WtsJ)`SuDzwbtG%avpnar$s(qo;zSjP!eP4tW z;YA&aWJOdFTO<@&7TFZp7daKV7I_qPD)K7wDe^B0Dhe%%D2gtME$Uj-y(pomf6;)V znj&2hC~7QPR@7Fsx9Iz#qeYL3UKhP7wk~!lb}No4?pfTcxUjgixV*Trcv*2<@uA}5 z#V3m&7QZU~V}#oX?-4#Dbmb#Pj~Fvz^@xolHjVgo#FG)vN-Rs9N?b}3O8S=!C>dD- zO2(IbQ?jdMPs#0)-%B2qN=x}tu{5wWsawF{r_0Wk-7kAy_OhHWw<~ujkJptamM4{u zEC=P|%a@d|E?--Is{CU4e*k_xoKxk6dtRgqB9zhXc|WyP3^u@x;9D=Jo19H}@} zai-$;ikB6yE1fHqm0p#7D+g5$sT^4eD#urTRk^itd*$)UpDHg@SyXXVLRD~8=c?GM zp;bjyBdR7<&90hTwX^EGszX&*s&sd&ey#dYZBZ?&cCS`f`&JLG&Z!<+T~|G!dQ$bO z>J8Q3RG+WDR(+!et&!JIH63gGY65B!YX;Y(*3{Qbs+m&LR`XTO`kIq97iuomVzp!~ zUF%;PQ5#j8QCmkyay}N4kzo8JRIMYvhEHGj$_p zkK8}<=*VLupNxDn^6e<6QJqGqMx~6(7?m|@%BZ=c;Ha-hZ5y>?)b&yKM%^DRj&>OB zG`iR5l+go6SC1Y$T0eU2=uM-yjJ`7Z?&x3Zs5;9!>$=W$J?awbit4KBYU;Ms?XBBi z_hF317}=PhG0|gU#^jFCjwv29Ym8w`-OmO=_$RX-ddlpAE7VR@6jLD9{~ce2aaF>$OPG7F<1rGfcxM%csb60T*SC2 z-8lWYspF=PJ2UR`xU1vs$9s(LIDXjpvhfw;*N@*me&_ht6Oakmg!l=G6OtxOnJ{+( zoN#Qy*$L+-G83&Q+D=THm^ZOt;*yE0C$63Nc;X)u|C|&xDSlFqN#iC>pEPsQrAfCZ z-JR?`Ie2pDNmD@to3WO70Zxl;SD6B~w;USv%#}VCuoCKTbU{jhSXW&30Paw7h8r)0(HXO4@tr-tyJe(Obv**lSGw051n%Oe*?#xFspUjGw z)pb_^UGZI+>p6pb4ShvbH~r!KKH=f?;7L{VuNMFu!gdRiiWifn;Nz>yn|mr3)l_z zh5g_>xCkzRkKk+gX5Qd=IrE0j+codtyu@vJt_{Bnth5Z+%FU(xH zX5lvrH!pm#@O`7CF{&}Xu}9#;QipIKFX0#(9m48kaUMZ(P~9rtzD` zEsfh7cQx*9eAr~u)U&CkX-?CkrX@|wnpWtVRyVC{+ShcX>3>Zpnoc*JYr5HVyXjuj zgQiDK&zoL0{n6Z^*|yoO*{?aEIkY*VIl8$^bGPOm%}LFv&FRfq&AH7L&DG7d&7+&^ zoA);#Z9d=pbMuwv>&-Wte`)@$`S<1*%^zB9T0C1iwJ2LuEuk&pEp08UTGqCF)v}>w zQ_I#C-Hw*sE&E!&Z8_9(q~%!4@s?99XIp-1x!7{0<$BAlmb)$YS{}4KYI)l7qUCkV zpDphfA&c-u9Tv$JQH$6`!XnE>HjC^RIW2Nsnx7C_nvjsh1j~c*1BpP)Ky23QcDc}_JaX%2pj?TgOiYfkTggJ>7{I&?HN6FLt1 z4Ri){7W5|c5wr+e3@w3{Ld&31DCiT^05w4^P&d>In+sbBTMb(aTMyd;+Xj08`xo{S zRthVF!C)0I9n21M!rU+~tPd6`Usj$~o?ZS!`Ihp%<@?KDmzS4Umcz?y%bUwF<(~3r zd4GAbVnD@^ilG(zD}JdsRgqJ1wxYg*R6(hrS1>DDD%vUsR1T{gQ8^k^nOXTu<)z9i zmARF9m48*V8w5qJ?QR%x|&i=gQvnrz(>I|;bY-b;nU$c@LYHv{7-lR z90b1yC&GDf0lXD1f`gQBbwTo-l)o!TGuKm7tSM8qKe`~?D(AtXHs@jHHRBcx+P#dfb*G3Vkh&04H z#CF6^#2&;x#8Jd?#0$hn1O!ozs6^Bw8WC1R00AIEh%j;h2$_mpi`<6Xf!vMUi#&on zhWrl+LBf!g$ZBLGvI*IR1du^w7#XcgtxK!Rs@qk!r*41U!MdO8eyJnWaqIYXEp=^m ziaJ&Oul3jJ|ERxSf200x{r!4!y`a9OUQ{osSJ!JBrZp^XSlaMy!^(!NhU|vF8j2d8 zHk33xZzyX3y=$;H^frVVA`P*|L5=B+M;p&IUTD13c%|`rKYrX(#Mm7_gNzK$|db6Opr8$BA68#l= z40=3zB6Xdar6mZCqQ4QM;sh4!HRXaL=hPGSaP z(l8mA8JL}zqnNvxr(B#`h8i z5K;+43Bw7a2$_U&glUACggJ!yghhl^gpGvlgr5jU2&W0Z63!ETBU~X|C)_05BK%Fb zM<^zg5y}aTgeC%sKqW8;YyyuUA#@NF1T{fNun<6QLXbF+IEeTaaRhNRaSU-haXN7p zaV~KIaWQcxn2LnusG3 zi7X5%mf6AL?^z z85KrtprWWmDuqg?vZ!3DnA%R2Q&m(g)lBWC_RQfWhJ!)c>vnY3}VX|$QNIkfq- zMYQd-T-qI4HLa0Gq_JsIS_e%>`%LSkS!i7}AFYQL1ku8@7<~|Z6nzSP8ht)}5q&BB zTly;cM*8>kE%fd5UG($xXY>zr4qZgo(QWh|dM`cBNMWQh(iwvpqZpZtaf}I!$&5LS zrHnObav6UxZZRG*o-;l$zzjG8!Ki1T7-$BCL1(ZS zATC3|XlHz4bTI%%kda^xU=Ct_!5qRI%^brV&z#7d!d%Nd!^~xtF=5OGCV|Oi@|jYm zjHzU5n0ls_X=l2a9;TleVWqQ1uqLslu;#KBuoknHu~x7)u(DY{u(q;xunw|LvM#f( zvIptrr)^pZ>tk)n`84Jcju&^u^i^CGJq%0Xr$f zU}v(&vZu0VuxGR9u@|yevcF@mV`s59v3If$vrn&EbCq+Alg}yO6msr!9&w&<{^305lyRy!C=QiF=Lk5h95JVz zBj3H;cQ8yOVo_o5RiJ=5hbx z-T`s%avyLXb6;>@a!a{yxgWSy+$JuWE8wF=MfZNYY=Z)Y^ z;!WYrAn2q1!T0a8E^umlo8yFe@WBrpoh0-L}q2nc!weS)YkRXAKYUN~Dg zSGY{LLbzJER=7d9Rk%aATewenQ24X(tT0cQFT5=T3GWLZ37-i66TTLf3EvCBLbwnm zBnkOKkWknmR0!2Vo$#};OXw83g+5`AFxfJoWl&3c%kY+wE#MYpOMMHfrMZRNLT$}% z-P5|S^Zd0`d+xkTlM9V~3qHK`p2hmp14$*GWMNyt8UsNC}6cvk}iT)M6 z5|xU_i>Hg1h?j|1h*yi(iZ_UliF3qf#TUev#J9wMi|>gaiXV$PVv$%Y){6~dlh`V@ zOHw66CBq~mC0|QsN#;rxNES<$N~jW^L?%&40+L=ypCl@YOH-t&(sb!yDQKv4xO9{> zQ#wvMLHeC^oit0jNxE6OO}bOMN4j77qx7)!nDm77lr%@0E6tPsDJ_uJNr_UjlqO|J zd)o)Jr?#iJXS8o<-`T#qeP8>5_Q&n7+DqHtw!d$ewts3jw42&39pgG?bZ(~2C$WyK8z=(ggC;y(pa(X41ybSgXw zpQ1+*RD=~VQa=vnra;tW!29o0J$OUP)52m0YDj*{U=uO-i%UrgSK~m0@K} z83(C`sD`SBt466ZRpV6CR5MhwRr6F?s%+IJ)n?T;)lStB)p6Ac)o-fHs^3+4sy|gX zRCiVPRgY9pRA3cU1yfb3;3|X)uM(=-R1#IYN~ToM z!D^TqrzWV0YKoe!W~n9W4z*m}sWz)^YKOX8?bQs?jMa?SOw>%#OxMiPEZ3~itk$g6 zY|vzDPH4_)E@&=ku4?XU9%>$Go@oBjJlA~CKr}E-g9fERYj7GONJG(ZHGEBrMx-%m zEE=oEu5oERny98rfD;@W3*GXi?mC$E3~V%YqcA--)pyMk7@I?Z?s@7Qd_UZ zXz^N-ma64w`Pvq(NSoAsqx)93UYDiYrrW98qua0hQFmIGqx(&FMVG7lLszJ~tpn-q z>*{n`Jt$2-Og}Efc|IwMg1jxp8ik$4gFvG zd-{j^B7L#`mHxfHQV%zz8%7&44bu&?408<&42uoR3@Z$q44VyG4Lc0G4SNmy4c86t z4P1lQ&}U3BW*A2rzcx-X&NR+5W*M`MJB>erj6WHV8jl+<7=JU~G~P1)ZMXJ`JDxwH#@PNtWHiR zzf))$W}0A{WSVN4Zpty`n(|Bqrb5$g(>>EeQ<3SJ>9q-Lf}1cVoQZ6rnV2RJ&m=Up znG~kaCYQ-=3YdCLeWrd>(mc?dVIFDz#yrP7&%D^Y%)G+9#=PFV(Y)P!z*PFn=G3x+boAHhb_k}KU>aP&Rf71q@~`1vNT)B z7OEv=O|_<3Gps|bqpg|NJ=Wva6V_AKGuBJiE7mG&vlU~-TZvYdm1B+A(rp>GFKu7h z#@NQ$_SsI@PTJ1cezje({ceNXFgBcxZ)>rMY*HIYW>eZUHoeVY^VxcAL0i}sv&Fko zx>CE+x~6t5>H4uNw+qt6v=6XPvahmlv~RL+wr{f^vLCizuwSxYwO_O6+Y9W4_S<%l z{a^bld#Sz5{@xC@!|WCIYCFcxw;LV99n&2L9A_QZ9CsWb$G?tOjyH~Xj*kwgqXOip zcGNmZ4yuFUU^zGrzN5tLoUKl&^OMuyv^gElZl~87bcUT#XWTWRl)o+6BV7h%SnY?qazlE~hKlJ-B;T_Yd9sx-WO%?S9o=(T(fw=vH*A zyLH{4yN&LV?r+=^-80;?-SgZF-7DSM?jPJ+-TT}}-8bB~-5=al?ix4J-Rvg2X>O*w z)vb1G-Jje>x5J(Aq$J>)&&J?=f>J>|{w-ts>1KKIsmv0l8F zg% zMc-xLHDA83!1vHs=BxHK`Y1k{kL~06gg&va-6!`MeGXsH*XIlSBK~xLhW|_dF#kyZ z*ZxWVDgK%MIsW0Za3zo%_#<#Va3fF>coBFRC=I*~ybn|cssr@_ zQ~({o1?T}*fEy48+5_^yr+_hF4!8r6KrE2x8PM}h&%~a|J=1z-_AKgI(zCp0WlvVm z#h$A@Wj(MSeotFZpeF&O0I5JaFc=sLWCCM>iNF+KI8@CSWtL4cHDG z08Rp@fpfq`;4*Lx$Oj4l5Ksa<2cSR&Pz}@qbwDG~3}68QAOzX~3D5y305zZkJ_DV- zDZMj#SN9(2E$IEwOYi;E>+J3BjrPWaDZ$iWdT?-XXmEINa&TI3W^i_JZg75ZQE(|J zcqCXDga$dm&d{LH*wEC_^w5mZ!qDQ-lF<54R%lOXf9S{1;n1Ld3t`da(seX2fVAJ7*K4-1bE zuMh7C?+Tv`p9!B0UkG0cUkzUigYv@#;lgl9_(k|-xHSAW{2>epmxrsuHDPwRBWw#t zA{mjXkrk2sk#mvTkw=jyk*ATD5oiP+sgK|y#0V+Ej&LKqh$PYxkwx?oL&O+yN4$|> zBpiuF64A71Ms!H@tLWV5#^|Z&mFVx$8_~a_ccOQr527W}7oh0NXle9q6d9F8P0?^H zE%rrhNbIZFh}h`Ztl09{irC87s@R^`@z{yjsaQ_zTNBaBwlkvgvG4XNn3GvDCY4MryW$|z0tKw_o>*E{a+3`K`eer|wpW;X3 zKgTb|FU7CMuf_A@1@Q;*NAZ&Qi}=fUY5Zdx8n1{~#~b66I4#bMbK?AXOI#J##I=dJ$@|3~uE{{R}N B&_nQPI7j7 zNbkK+(|aHx$xZLQ_g+XRJ*4NY8ClJiWr>)3-}?V|*ScA@+4IfJJ~MmT{`OwjRA1N9 zI&$PI9O5uXIG!_b#=;v5Z>k!!Jks1!*Vs@ssBz(uk($<)IfJU3YnIe4k2Hq{wKg{8 zap=UWZnhe)=LF8g<#Pp`jVtCxawXg-ZZtQB8_SL3#&Z+6iQGZlByKu4gY$5Qax=Lw z7vbu-Ca#rR$*tjz;g01_V@_} zy-^>uFY1p5p}}Yr8jB{NN$6nYL@qQP9g50P1qz_q=y0?cEkSkYNVF8yqh{2CRw5k% zIu;#=PDQ7ov(WkI0(2p|5?zHhpp9rNx&v)P+tHoqF0=#PjUGS`qQ}s4=y~)4dJ(;e z-a>Dq_t7WlQ?wI(hrUNYpdZms=vQpQB9^d>6|7=2w&Ff`KRf^r#>4P|cmyuNWAFrA zhNt5h*nP z&kMZ7tGveN^LD-$--q9iAHWahhw%sUBlr@23_qSfh@ZkwNAk<~CccGV&adLv^2hMU@+a^o^QZD>@MrVq@fY%!@R##f@tgRq{I&cI z{LTDr{5JkB{vQ55{z3i`{&D^({#pJ7{$>6({!RWJ{(b%<{!{*Q{wsbL|2_W`znlM^ z|I5G`h{0$O4T`~H$TQdsg@)dSzJ~sWL587*0}Mrmk%rNRafXS8$%aD=(+qCI48u%= z&v2L_WSDK3YnX3XV5l)HGSnIB4UL9oLz`ixVU6Kv0~n4soMc#UINflT;atN7hKmiC z8Ll*JG;A?kW4PXMli@bQ1BM3;4;h{?JZpH>@S5R0!~2F044)XjFnnqF%J8${7sGDD zuZG_Ye;HMyX581<&p6OH#JIn4xUtwc$~e|I!8plyh_T%0Gy08{#<|9Y#v0>NW4&>i z@hIbR;|k+S>KFrH$(*m#-oTH_7In~k>_w;As;-ebJa_@MC-&bvPmMc`Um1Ti{$%{w_>1vx<39o?@Pa5vf-KksyWkKCgBNvCBi6Sv@lNiUHHqynTW|~5>1N9V#+hwOogW2roN{Bra`8mrUOhxrje%6rg5f; zrpcy5Ow&wm(+ty0lh1URDP)>$nroVGT41U%Ei%=a>P?NNW>cGKrD=`nXcL%@H=Sf! zZ#vy{mg!v61*VHlmzl0KZ8U8$U1Pf5bd%{;(;cQeO?R9AV|u{!u<0?=lcr}(&zoK{ zy=r>H^tS0e(}$){OrM#)G<{?G&h(?{7t?R1KTZFLSTu+xQ5MajRV)x4;y&WOVn1=9 zI7HlE94;1%qr|b|1aXphu;>(B;&ky)v0SVWgW@c4jyO*YiwnhwxI|nkHi$=wt>OxC zwYW~y#pA>i#Z$!7#52Wn#Ph|A#7o61#0}zR@oMop@ka3$@pf^$xI?^GykC4sd{lfw zd|G@?d{KNwd|iA?d{_KH{8-#6ej$D>ek=YU{w)3~{vrM?A&Hj+Ns?4alkz3I)Jy6k z?I#V821~=F1EmpCi8MwUFC8RJk(`oWs*nzo0#Z;4NtM#!(mbhJs*{eCmP+-~GO0~k zF0GK(NV){lsnTiE>Czd}nbKL(dD3QSi?mg`TDnHMR=Q4ZkdKmE?ecbchkUPmzxqWp^d zy8M>>uKa=gvAk3MLjGF*R{lZ$S^ic2L;hPq3a5#@2^Ddkz^ z1?6SsHRVm^9p!!HBjr=&bLA^#m-4;xld@a+UHMDpRH7PHQB_onny1>-LbbQrSM9G3 zQirMss72~Xb+kH8ov2P$4^gM7Zgqw_Q}wBbsUdZ?I#->qE>LUKMQWW|uQsa9YMZ)J zU85eYf_l7qlDb|!T|G-ZSG_>JSiMZWQr)O-QLj<2S8q~pRkx{ksrQ4V*XPM`i=bIOpYt4(zOU(`DW^e2c@fkEO4rzh$sxm}R)7*fQEO&T^1t zip6PhS!P&fTKtxPWtL@*Wxi#BrPi|8veeRGX|}XkR$10sbjxv;lPv2k7g#Q|Tx7Y} za*5?q%Vn0!Emv5svs`bv!E&SJCd%P{$*8QvpT8CSUtRt)kStnU1 zTc=n()*2Asytn;l$SdX+Wwbol#T31DDu>msl^gUS_@Adad<3>-E+SnswzW_{fHg!M`5tJc@7uUp@+eq#O9y3_iZ z^+)SZ)}O7v2zcIflzcs%te^vhK{G;=a$v-at`23UePsu+$|BU=|^3Tn`IRBFT%kwwoZ_d9a z|Cao1`8)FO%YP*QiTvmCU(A0!|Lyz_^FPV|GJjY8Px-s^|0+NQLV;AE6%-WgQ?Osb zz=EL#MFk}V;|rz~lom`cC@-iem{l;lV17YOL2bd3f`)>l3RV=XDS(2L3QjIKrQqy> za|+HaIIrORf-4KID%eo4vEbT*+Y9a}*jBK;;Ld`(3U(AcT<}Q2qXmx@yio9B!Ak|N z7JOQ;v*5FW&kMdN__APE!S@9}6#Q7Q+a}sno7t9U%eOgfg|=$6r0;tW}9xCVXLqmW((Nn+77qPv(2|fY>RA*ZA)xTwxev#wqtC^ z*-o^rx1DM`({`5aJlpxUi*1+KuCQHcyWMt&ZJTYo?M~ZWwjH*+ZTHxox4mF{(e{$< zW!o#ZS8cD^Ubnqr``Y%6ZI|s^+jq9_Z9mw4wEblJ({8kDcB|cC?``j6?`I!qA7meD zA7&qJFSd`ikGD^@Pqn-3GweRQ-(G2-WnW;gwlB2T*caPZ*;m`w*w@AHCI~B--EoIwn`67%rnJnnef@v`F;$E%Ll z9Irdxa=h#K%<;M7i-Nw6FCD))b~}D`{O0(*kSj!mhC*YZQm7RUC>&TgsBm!Mkiwyb z!wUB=JfLu7VM*cm!U=^37rF}Fg)<8eD?Ge#Ug5&RMTJd;M-?^~wiK=^TvvE>;qis* z3(qM$xA45e%L*?qyrOVx;njuL6kc07sG+UC{$)<&Bu?fOPUW;&gKFxlTUuuOf+Lnj zR<+Fa1xK_W&GZQyIWuR`u}(H}RxVHH!SGmEEAY77vm2Y2wlr1OL|lzEZObAJtzm7@ z1n0Cd6UVzsi^e!dmKBYeRx+|^qT4;LXjJKhvN7XJ-6JPVaK~!4a}I8xKKXjCkn6?m z!}aF+aQkw7x&63)Tz_tWZqSXopqq42mvmWIbX7O&mg~7e++c19HV%5*!BD}nzF486nJ_^O)5+QfB45$SvX) zb4%v%GES`J%e|$U()$^^G;v^-CICT8k@NBF!zuO-t(P>zayPk(Q;c zjZMYXZLN*Vs#~jz7gpC-H`GL?HC8v*#(o*Gl4`%Hx$(%CxFflxTs^mJZge6yR48Srl8TDZpO zkBQzc=W6wR^gfB6t)~5{$hAL>E9)8-7qxe*bB~G>6|Q3y_Dxg>v_f}|6;7*bu5Bq^ zNdKGATT(5@vs(HkYB`zKGBd|o>Ka-j&5NpQA|;)>l<3i^tgZoxy3V9^&B(E?Xpdqe zWPGAmiHgo)6%9&Mbitlv^4#7`Y6pv$72yPCUEpS*>;hP#%#j=P?_K|er0P#>-r=_9sqH*q&} zw{W*|x9P?DNPUWauwEV3ic-vA)Kpb8N0!$$wzag|#^@hQ=~^4shNY1o(ZWohx2~mi zL`zeBUF-6?$cnHwJdJ1!I#k`fIFgE-PCOW?kJPkAYGXFh+gQVvs+RN(4@Ya9)>ctvLH8Pc>|BD*4m$;Yb`GT&x8oDl5H?P{ry~H&p zs(FojvqLp+>*Mr;22MS;DGB5~?)@X;Hq_bNT)k=&_a65F*Eri3WWPr1`iT2DtmVfA z6P0e_KBCoTQe9LVJGsyF@CNR4{g79iu;=ThTFw`%Y8SewQgCYrL}rl)0`Ie zE$bQ^4zF$BdQ}Fzx>H}EPt&L7KIc;BcXXjini2z@+iFJM|6J4jnAYsv&)jeNm;qgmnD{W9-Cl7PIzqf2-hd3cC*B}r zLKc1URwN<`$w)yeGV6!xGj*?CuKRTVR-_>-H-sC43XrB(=!fY+J*bEDif~HHDw-Rc zBF(L zzP`GtCDNg$RsBkZb?Vw;*UBwU7?vq;D;) zk($=F=19xhwL#a+fZrd|*iJ>}9z;X+N!OuaXn%A7IuH#+$oNdP$Ak`|LcNeH(p2BLik>OfuJ!nWA*a_H zu5gBC#Kfjhu|X+f!BD_c5pEZDyTTO#zpJt|4m_2X*auZq*DS53dwPmn=CAO2f}wDk z$Lo$Emr}XHWVv>ru+Lc@mnx%DlblUW^^tyF@2v8EHI2)f>gprS{YH-{=~vXxTivj@ zt$J}}?b_+3rQy=B*Xf&H>74Ek2SZMu%NcORs`F5wgS!TrUESP3O>Y!vwlm-hJE!?8 zL*alsR2lGvLvt$Pjq*~g$vI(F`YMBNS9q2);Bih%D8bK^KsTgCuE?UghDhz&wG{#P zG>^~a@lB7~x)0D{8>t0kzo8%sg=6Egee!o)Xf~lrG>h86_Tk%wSXj$<%?foaoS5lz z&|H1;RaYhcFdr?{CvQeapfFm1s`VPZR*&e5HlrF;iy~-|zF1$PAEmEKzs$uJ(e}pl z4L?(Fj<0j*Y;G4i7oDfK>I?KXy=5<)DF!;XwQZeo8_2s&2Bf1=2p|8+a##XSzs?_~g*o?NMj?8P&P5R_5=vs6gx*pwtZq(Q4 zYxQ;d(fTo4(9P%;bSt_I-LC68=Oqb^mFy|W<^Ip>TW1?XA$40W$J1rI~>xrjn432Xoq*}=O>!+ zJ4SSu_6PbC{e}KU|6oqPK)+DGNWWOWM89+^(lDXBG;CzMw9D8o?ehO`?$V-4dJt>+ zq^n{-=j)TM!3Efc?bv|}aWA|N?yX;;U#VZEZ_qdDoAk~4miCPU?n{^GZZ}G}KNZVm zql5?165Vf<@KCl#=w_pY51>**Q7}RseEL;Xw=QW}yVmKQ?VJ+~2iDTKw zY{5=E4VPjUcI!9lH|clkcL6&6{~7w$#>DOg`*4Lmc?0(AH*dg)>9=Gs^te)=v=Pt3 zv-Ml`ZLtrc;JJ9dK4Bw19M98l({JC%J&nWqIQ@<+41I0%rXT?o!Am+o9jTAgw-20p z>@Dm%Ad~frPUD@pQ4epxP5O?wp~ubKF5HUS^n3IL`n~$yVa=QQnyS<7Z~Q7O&9x%6 z_s4$w?1AfVjdjD0SL37g$(!&RZZ}@1|3`P}_ie(*U>$>gzy5&!U~G4jmBCMJfx5o; ziFN&C+VxZPhY~G04WFw|-ilAhXW%pOS@>*xj{dOzi2kVlnEtr_#8z$yKA)NVi;$)V z^(UFhe~Oy?%!k~t_J7%#Jo$zrcE0a$j}TSTgLq?-$={++x>kOuj1G6>-Y`)CVmUQjo-oV;`i|T_yhbQ{s@1J zKf#~ko%l2SIsO8FiNC^M<8SaT{4M?te~*8_KjNS8&-fR-8~=)b!@uJ{@SpfE{5SrG za0C%d2;qr=7>PhkL?jZCi9%FjCKjR*E6F4Iq=49noj6D#=|%P-y-6RkFX>D6BmGE! zGJp&ugUDbqgbXFa$o}L2av&K_ipU63Oh%FtGK!2QW5`%Cj*KT0$V74wnM5X&Ddb>s z2$@QpWEv?YF5)I-WICBaJmgR^lXyuv@ex0%Acv6v36c=0B(unDQbp#Fx#Vy%kIW}W zkT6+5s>wo9LuyHcEFz2br}by_XZ7dw=k*u#7xkC)m-SclSM}HQ*Y!8_H}$vlxAk}Q zclG!5_w^6-5A~1qkM&RVPxYPpXZq**7y6g_SNhlbH~KF9Tm3uzd;JIfNBt-LXZ;s_ zxBjdCoBq50hyJJjm;Sf@4lKqY`i0U8Zx44|=q#sL}+Xab;#fDQsQ3D9IfQve+d=nz0t z0XYFp15^sg1;`Dk4A68yGXQx29SUeBATOYDKt4czKox)v0~7!h1QY^P31}9e*?_75 z%>gtQ(BXjQ0h$l!2tZ*#3jkFES_r5HP%WScphbWd16l&84$zT+mIA5=v4p-{@IyP6n(p?w1J00}ULlrwydK~5aH-!FyJjP2&_Yu?7OIH#)Z;7lR=V8b8E&V` z9SDZY0{-$a?OiB#C7Gg)vnksC9nk`nzHlkMG^7`XAx~-8>2rGL%yq|=cpfb_s$;QW zsnZvxccnppWuVl}E;fU-&#_j;6?-8q->+kNHeA|!o^*LiE}_zc5!-hM>lV`+$+c^# zPQpRYbYIx*3wc6bcey+1%dyA5{#_RFxLBb%u`h8ItzmeV;sJMij{=omcQE-Xm~5gY z2X!dv56y4~*jJ0Tu*~b69`E~BTEfx!@;F84S}N2hMabz5xdT3`p6LO9Wksy%H)N9Z z&-Nug+Raq5cNduSL=uC%jS3FyLa@ReDE9>E;15PWaIDleTB?6asZxLWG>^|6p5bv- zc*34?=k&x7xQmMSOA&XuE4=loG;!*9fuN-Ud<5adFMb;GzC7Z6Ns6@9XX!0x-8=L}E z?kx2OtHMdz3ONJQ-Ep(>Lgyl}M+BF<%;RGVN0_}m#G3nZ=f(tQCf+k*Vy{uLeN!4! z>aUm+u9)Er(#M5>Cq69Sq@uZ;{K-4CL}3be^id@kniGE;An#MD)Il6{S2&r$OliPJ zQNfv!=7vapzw*Y~w))80wdHw zEt%9$uPYqYPHe6xJ)H5z4#MuL_6<#NPWd#yH)_$^mClP)*q&$;Z9q8SuMBy7aT$fm z3`xjPZG>lgLNnq|ZDFrxTEH3b#FfEYXc=3gOu4gkM%bVHmd58%k%EMXKj3l)k{=3r z8q=fl_(Cd_G;HEqi*jc$&PkAlB3r6|ft?UWxjH?$S`ZHj@@*p56H+HM7!}acHo6y(ConGDA6&TNOE83_@QGoW?F;c8D+t+%NcUU%FL%_h9;MZP3rh*!tbSqD9IOsUqB0WcM;Fm zP^m#F&~e{~a9Z6i>ePXX%4uFtX{NT+(K5r5YoW`RZ+g_O$LewWO8rqs0(SAv*V9rd z6JvJ3SyAB*be=hwmphJ=9M@TEblFAS4ARk?L z=Ax#(>8Vd66`G}gPKl?_PVm6uC-r?xyvtJ+rDy!+|&~%c}ZBGPMC=~%jn|C zEMRQvoKA%cJ0LUR8UA2=#X5^hInqf5Jh7t#e=Zdogl0ua>d~`q`>w4b;AabV>h6ta zUbTHwO0-L`%ZC(`doe{H+_h|U*$cberLkQv1NXcllc>mM4c6(u=J4 z`phn?rbo5T_QEl>-$7w}=vUU;GZ6FmWRu{{@L^zG_KPJC~z)=iGC`#k8SK`x&uD9 zH|%oHa(n$1w4PXVAEv0Ib5Jk!9SnOz{usbx6d;2OU)s?aa96e;{*yL>Pg0~qa%c+e zpwCZ_wB>$Zc$(K=8o$-%pP^tAas*>Dxjf;t$v;m~au-EUeh!3wfC{Tte+rd9{hBFZM1jCg+dY$Ta zh3Ow+L*i`;bx_ycOoeJ6qRwD>c&2;KY=7L-lz*?AzNO-H?Xt>$NP#kptOQWh0GC!( zb?N`ge?q~=X4AVQFm|4gT^;kEWeq`xcpSCzUeQr4vJDAf3Do09|;2!%Ycaq}YuDa{rnacvUwqv>>O z@(Tsbuu>+zdy=hWD|zQ0{zkDf7?iYFp@3(4z*&*FtN4?mWLVA8qLg|4PI_bxSNJ{j zo;y02!lh2HH$Hv;$w_zA`cltmy4enwRr=TlBL&caa|R5!r+e5HQF=f_PF?r-N&{|s zL)`vK<)Qb3v2FScCq1FH4;zDt0#D3l*hD8}`{-le*%^rWT^VEwll6>B2ALkmFjI_? z*)*m7xhMJbV6f8CSq-uF(iPsyApL&|N&!WgnoS$%piii`eNW!5C;M36`sD;w=BMwIr4^O*oJ!3o zeYT82F`i0>ZB**K4$klgLSeVBGX5BD7(&}pmhD%I?w+E&jMzhI`%yY+V(d>bGi*01 znkf^vCeqTvPK@mzFmch<`3A}`oI=j+x|umZhJF4J?Fj8aY#7ErO);&<)pb2-NG7E- z+@&+a!P%ZrDZ5DzrM&JMMs=f%1Y*=mFyczsUF8XdL!NT_s6e;q^ddOk$gx>NQq?lR zGJj=&?UkIR^gf`}PnklAH)z8I3YlRXC0ZGW49;-6{Ig>NaS{daWuv&HZZcEoDX;K) zNlGvudUsTBc#g*y;R+{73Trr5cClyPIiA%A%EsPHsTi1s%17Q*19h|_ayZM>oE zCBHMMi-KmDN@;ZH^wQIo$46hsyzZzUMaW<3kI(Pv6f?s?APuI+H^W1FO`RLs@A~5- z=THjg&8FY+&&=LnonFsecYw{DvP!RaPIy|tSvu1la>p)}4CNF#!ALT4~^X&Rae zT@mny{M4q$A?HxYY5$_NKKE>TRS=t(^C(_0C%p1XZ^%>OrKTtT0?*pW++%`nFS{a& zRS>2XWY|2X(SgtJ3(xWdLX}Q>H4rmzF~EfsFvCKd0PJ8%Dk|uB#4+*b3u1vPui`nq7GZ=OU0!jM-oz^oeo8e8rcY8gt)iyRV+4VW~Nnu`6o$5G_ z){(U>j@Hq2i%+Czxh;`Y2hlASJ9pA$y&~Z8h00}lHn_sh%8)-k_F1r!440ItBcZ)*GkyNqbZ6?EUha$|okQD|D-zq~#y)hchv!qI z+~#9D_u1>5=8o@7E}}45nd!K(X@{X6Q04AW{Pn|d=^nRemeX4qw-8tCv5Gp7x(ypB zQiglG#Ai#g4W(68BdZe6u<@O<|xQ}lXQnNJD+_JX4!!aLQ z=JLe6TdD6;Fys#;b}~0otcf{bv865O_Li}|R-6}OxW#a53W>~chvDudvan%Cj4TZ3 z%q@m{4EGxTW4O<7KcKS!oek(hKo@$z<+9!&CaC zjfSTU&j30XP@LAO7@jx0l**1ZyrPc-bl$*D{3^p6hPOM2zDq?f$c12K_(%_LFnkQ? z;yA;~@F};;@R{LrK$q$Z09^*?lCb8+ zHhc%@a@_^!icN+e3_luv0(2#ys{n1tiDoq?9>^&Rj+x;Ps)9cOZA^@)e~iY=v6pclV{c<0KwAK9 z1#~r_YXDsf=sG~x1G)jwjeu@yx5mc(=q$;|f-nxCBHgecjDx9Aw=TNIVN@&^S6AbK zv_$vLOvVvZt~=ivV+obY@NATNU)A-~ijhT8>6!DUk%e2?qqVie!eL}FS-R)N8mAZ! zPBCdlr*V2x1TAA(EP@uGTelcz7(K>AjWdm2K(_(99ncOycLRE1ub4FBVa8xA9F;Ky z=#F-iW}K~0+GwmY&H=Oy5DV$YVqzNS8N;bzp^VkkPHpdO(u@%!rHm#8sxlr)MeoYR zq!}q^)Yt^*p14UfHgmg-t;RM$|Dh)BK0x>8&KpgQ?35b0jas#up{G6bqK53zgdw{$$&g(^4cV2(tBf0r8;zTcn~htHTa8y6uL1NJpvM6{0q99U zPXT%w&@+IZ1@s)C=VOM9`J7}nWXyr2J441iL%KI)%+n($L&h9BvM^-KKcibi#@sD> z!jLidiXJj#%&DSB3>ovT$ik34X?!ZxkUeXBS)aVc_?+>1;|s z|M-uok@>!P@`I)CJRj?iT~Gxpi}WH`1dZD7&j5YCNyroOg#tid0QwTpS3MQ(Wlp?J z(dBff(3`5E51_9T4cbo_qEFr`^b`6E1B8LXAYm||ZvgEA^ev$80DTYWhpk8xhEbc& z4H1UZ&FPOU4nPplPpQjI7tH$qVl;s6!V+W?!LNPuA&gC|Q^Euq2;f0sqHvHfNti55 z5e^m(5vB@GVVY1XxCFOQCQKJ*2p-{3VW!{}$_1a`7b=9qgn$qfLPDi5OPDQG33G(G z!r{U^VZLyL5Ed2))xtudMyM4c!Xja@utca6jue&(^};ftL1+}3grkIJp+#sF+Jxo8 z3Sp(NN?0wd5!MRpgrkLH1YH2(Sm8L~c;N)$MByaiWZ@KHy>O~a>Cm9RnBC~Oio3tNP(!qvhx!nMM6!u7%p z!i~aB!p*`h!mYw>!tKHx!Zu;MaHnvWutT_8xJS5G_>XX(aKG??@SyOJ@UZZR@Tl;Z z@VM}V@TBmR@U-xZ@T~Bh@VxMX@S^aN@UrlV@T%~d@VfAZ@TTyV@V4-d@UHNl@V@YY z@S*UL@UifT@Tss<_)Pd*_(J$n_)7R%_(s?zd@Fn>d@uYU{3!e+{4D$;>=u3%egpI~ zpkDy(2J|bS-vIp%=np`D0{RQk-+=xB%mGG#F<=6i2W$Xr1S|kH0TuyEfMvi6U=^?# zum!LN*a|oga6aGyz&5~kzz)ELfO`Sn2XJq|eE{zZxG&)S0QUpjAMgOc0|5^LJQ(m0 zz(WBK1H3=r0{|ZgcsSr9z#{+`10D&u1n?-pqXCZrJQnacz~cc=06Y=!L4YR#o(y;j z;DZ4l0(dH5C*Wy-O98t8y8)L0o(^~hU=QF!0nY^N1zZl;2iOm|0`Osg1Av2oLx3v* z&jLIfa24P=fad}}9Pm8A^8p_LI1G3J;A+4N0oMSo1snmq2=HRSO90maJ`(U!!1aKa z0d4@?2)GIGQGlBPw*YPh+y;0#;1z&Z0$v4pHQ+UX*8*M#_-Me#0M-Em;9~(F2l#lv zCjdSX@JWDA27C(O^?*+Wd>Y`>0iOZ*Ou%OWJ{#~kfX@Yd9^mr%I+-aJa6HzxiO%rnr${}!oi8=ga5jeoaTza|< z9AIKzJ88sH0|%IxdrnS)15C^pC%eD_Cgy3AMc@Dv^QXx%1b~UT&Gb|V02A|^$vFgo ziFwKFc?bX#bCSt61b~UTzvL7Gz{K2JvJL@YVqPkH5(2=)yisxq0bpXzCf$SpFfsR$ zY(fB-n4d^jlF1Zy6Z85=Wltx<3M7R9FfnJ2Ttfhun72mOAplIw5hL3W04C;Vky{7= z6Z4?xJ_LY?c}Jv|OVJJI|37X@b8Y-Z(?uwsUtJN{Y?riU}6vQy}RJnCvm+^?AbjVUcZUG zrgsII#OpV)_wwv`{U-Jnp2SX0$Llw-SM4N%cK5t~6MLXe;=8vONz3auv6tqoc>N~! zjGQB{-^AXDGw}LN?CEz;c>N~!%$wBDOuT**d&x}_&P3ievB%mZihCMfzlpuX=Emzc zu_xDzynYjVPR+#YH?e2Zq+yee*KcCapGm_ePU7m4*KcBvn@OFHk>!(k{U-KOnM7C5 z!0R`$m&hc-d}dz1=@$A%kp!KX+nsp*rrW7(k_uu?P44E*%Ge$x&rmekD5 zynfTY9Z=i7E;{h~P4`o=q~2uU^_w1|vV)U3$=&h#O^?zdDNOGSynfRYR62#eoFtvX z%``ntMfZT$Z+eav%$nD4da+ZJ5_IQOuTj%0RJ8jWWYg9C-bvcQaI$ zpxXQCQ+>>vFXlyH`XEDP>3IF7k7 zB2VRW;Ps0FEs!OzUzDhDM>hVR@cKnH9nzlg`bCW*b^H?D@cPC4Xs@&4^^5j&!d>wC z#a>jnJ6^xohe~zB>lc|3RJk z%Qln2FD)%rd`s9BuU}*-X)-t)riG2sGgHExi!5tR1}0=$sCWyz=Jks#(M_&h@0Qmu zva~n3wl@o2zsPdnP`)X~8=3`o+U3O0M>xOJ2Wt1cl4MPe|LSjJ$rannGsqYe);3 zhSx9FQlJb|wu7fv2426&vVi`3Ucbn~6KCjKhbDDN#TQxV;tV4z0TeaBnRxvo3v!%+ zwUHJqh1V~#5Xf0U(8-dH*Dtc5$XT;NDq5(wWBCV%_liMHUA< z!#GN`G7g!->layY@Lc(-agu6AUcbn~is$xGGV}UH7J)n~u2s@UiBlre^ZG>=qddb@ zN~6Q>c>N*^VxHk3kOngYuU}-r&i4kdUu3b-Gi-U%G&IUy>zs8}WKq*I816J!F%Eub zUcbnqt7k>vjSuZq@_m=Qev!pv&ut>6LU-czi!5?`ZuI3;$W&gx$l|;2MP9$i;=*Ux zuci4|8F~F83m%_gbxZ&z%}r`aI`R5N7Gi$SdHo`bH=o=0?S|JcvZ(aA+Q}q_XWV3V z=Jks#jD3bZQ$lm`k=}vVFS4Nax%D_Y{JP}zODqh2hHY}1R;A(fODraSZWALDuU}$e z^RqGwNy98FUcbbG>}S}lr1p4^dHoU#)W3Im{Spi8pS3OShSx8#900j3k=^n7C6+ZH z>lSvy>z7!1fn3qL;`K``^FW46%G8mNf!8mw`~}3)M_orH402s{ub1r| z))xSNMjtlNxxKD!`~2H)j&;W_9VeZnPu?UQFP$Kr2>4mR&jEgZlXS9linJc^3xHn) z{L(CUE!E^+BZx`o(8iw&_~k^yE|4zQCvTN5lrEAkmM)Pll`aGP3gA}(zXteqz;6J4 zbE|ZPbY+wvhBUx$0f{CD57JDr*-)O41o26T%Tcz8i+oe0C zZPIq>PU$Xbhjh1ek94o}AL%~ne(3?}LFpmsVd)X+QRy-1ap?)^N$Dx+Y3Uj1S?M|H zdFch|Md>B!W$6{^Rp~Y9b?FW1P3bM^ZRs89UFkjPedz<~L+K;wW9bv=Q)#F4ne@5z zh4iKLmGrgrjkHVpR{Bo*Uiv}$QTj>xS^7oVE&VF}CjBn`A^j=+CH*b^BXcs6u}oxM zHpoU%33IZw`)3uK#YmmPAU+)Lg^?k)F`_m%s~`^o*}{_+5M zpgc$(EDw=~%ERRS7LXJVGv(N6IDgD0#FzMjk7VlgG;wJLPF|sqB*7a+y3`o*{eWL*<#WS1y-*vR|%{50e9OP!7qJ@+^6_TqVzu=gNo6 z^W^#R5pq~wAXm!^wDT#qtulPCimzD%Z=)0KX0R9l-Abeh={bfIk5IA>fYy ze+>8&z@Gx%3HUR>p9B5^@RxwU0{k`LZvgKC{4L<`0Dlko2f#l9{t58UfPVqJ8}P4y ze*^qG;6DKW3HUF-e*^vp2nPfK!9WNQ9*6;m5r_c91VjWP0g-_yKvW=RAQm7R5G#;8 zAo)NFfY^Z8fjEE^0_g>0A0WMf^Z~LjkiJ0n1JVyje;@;Z3kl{dzfQ$fA3}hsb5+I|1j0Q3W$XFobfQ$z+0mwuk2LYJ`WHOK`Kn@0S2#~2j zoIs`lDFxyJ;s#O%WIB);Ks-PW1u_$e7f3k}9}qu~3Lu982>=NK2?41DG7HFTAXPx- z0GSKqa3J%5%m;D=kT8%1K&pW(1X2T}7DxohA|Q)_ECEsn)&n^e$Z0@M2XY3GGl851( zg~{x#FgsG1%%1@6-Q zQkcvh-m)fz$?RoqPe@@hdr-@T6ehDrvu;RXa>i`0$yrTh_S%&VDNJV1TB+2Vw4^YZ zy;bE(3X|D$RMw<0nY}7yOA3?Oi&1W*FgZiM*Axx1cb@J^VKRHp$&M5zv*(#iq%fI1 zx1{p0Vwo(H(yqyAlDu}47glEPBRP@6WcCJ<+L$z?FgfRBuX0w&Ugd0)y~^1pdzG_J z_9|za>{ZSo*{hsQvR66xWUq2A$zJ8`lfBA&lI&IPF4?QH$H`umT#~&ixg>j4vPkx- z>_M_urTb*BO1H^gm28r|D!C_nRkBF-s^pODRmm;ctCB;qS0$TduS#~wUX|REy(-xy zdsT8s_NwHb>{ZDr*{hO$vR7qKlf5drC3{t}Pxh*0ne0{h7s+0gT$8;j|03C|l54V8 zC8uPs${r+pRrVy=tCCf+S0(3UugV@IdsX%**{hO$vR7qKlD#UqCwo`Ag$C6{EcN-oJ>m8_DzD%m7^RdP=Ds^pODRmnZstCCZ)S0$%p zuSyQdUX|REy(&GM>{Zz-$zGKnO!lhmon)^{&n0_R_Exf2W$z?=RrXf0S7omxdsY5z zvR9?YlD#TDne0{Rkz}t*&nA0SdOq2!(o@M^m7YuXs`OB@SEZ+ty(&GK>{a>q$zGLz zpX^oXfn={r&n0_RdLr4Y($mRal^#s?s`N;*S7mP{dsTWm*{j;q$zIjHmF!jRnPjhO z4E)wP%yPs(UTjtJ;IfUez8= z_Nwl^WUp$^Cwoc33(s`h-cS9LEYdsY8^vRAd|lf9}v zlk8RP;bgDs-b?nX{+E)ys{e&#uWFAbdsX*BvRAc7lf9}vn(S5Wv1G4mk0pCmdnVbd z+H=WX)t*WAs`gm2SG6aTy{da7*{iyDlD(=unCw;km&sn+UB1<5=;g|mNOO@ZvZ$^h zQoEKB-DtRfZ#Yy*BQjE1nR`&ZU7xf;y#vVQvBK;TC)EKk*&8uVqfzfx?@1w(srRWr z>XUEQ#;EtJ52z2S52+8UkEoBTkExHVPpD6-PpMC<F;&#BL=FQ_l7FR3r9uc)u8 zuc@!AZ>VpoZ>evq@2Kyp@2T&rAE+OyAE_U!pQxXzJJrwB&($x~FV(NquhnnVUFx^$ zck1`*4?wO0vJuE;AX|Z41LQg&HvqW_$Spu_19AtD?Lh7VayO8Bf!qh=0U!?nc?8H~ zK%M~d6p&|tJO|_jATI%V1;}ea-T?9zkavK*2jl}F9|8FU$W9=i1Njoj*Fbgw`3}er zKz;)93y@!d{0`(#Ab$hT0gr*_fj3e${G|S@{-W+ye^q}|e^>ud|5X1{|5pDob7o}5 zW@6^e2D8yDm`!HUESY7qVph#&v&F2Lt>!#)zPZ3`GuzD$bD_DHc^`9cb071*=Dz0r z%>B&$%>&E>&4bK?%|pyXfj0qf1-=*X`vN~0_+h}01b#H|iYoM zpmXg5&CCLZPRN`xw>Gjok%z3fsk*tfuBJXRqNb@Sfm{+r-jqOQhyBvb$gW7s($>bN z;#d;gL^UPFi|QivwW*a%h*q*CQ3><6s@U^N7S}H;u4!zJ6t}KwiiDR(YDSG%lH9FB zqBUNfsFB6o9GkhuRE4GKNLdv2+5{{+;EvA)w!X18($ugxRYT=b;Oi5>%zbKNF2KvG z8|s?c>Z@BDn@223eBn?O`o;t_izAtnDq15eTico=$*_kvn6s7dOni&k-aq6!vSF{e*TN8{r9WvN53DGIzT0n832 zqcgWX-4IM_?TRSqojMoR+$)P#)-={eifZdxs#{tj%NEwJI;gm~rM0?dX?Rg{^|Htz zP0fupk(L&ZYf8!33FC&eE@_Tb*Lqx2#tmWr?r}{SnHVT*qu4ug#~wF!OgGpN#l9z@ z3`QQxNf|2|o9kf)P9W1Ry zPR6{tt+uW)*_i(j1${RG%A6W<8zR&$CaspgMKRw`U^1?aE5{*{l9s)bIxJkYmJj2# zSXdg0oCXOS+{qhrix5TqIDyLE?Q^nKv8`r8B^GlO_R|C`qtoQ}?G{HGT9ZL-QP9s4 zpezr>xXgM^o!;@|Nm66`L~*}J;IfzO*_m-OP4W1-VNqK{O*pc$sj(r_&{|hrKccRo zKE4&Q42)L%Ria|Xr&-^rVjA_huBN)RuCbw|xQgz^MpQR76_++Pv_=|QTZ&zg<&pZv zrbu&fMSXSaqQ>TBEyd;4HU41Lh^F>GB*_F_Gh_c;GIGTD5iPY#i`!b7i|ZO{>f34~ z#nsJC^r#g{QGWZ`@S8+i+1W6sffAjKDdVsGVEAnUHu?(ONd@$#q@}oNNnL$iQ%2p# zR?d`a+K-0cC#qpb!<@F?X@T2MfIlXHqgfaBy#2vQb6s`4r=c~{yr{Y+`7mT@KNtR- zsEnNp{d-PzNEk&%?vVcNWiif6}g@}I&Suo)|wNdH3*3s*gH>7`wzC&W0(>r zb9T#Vmb3NAn=PkX&aj+mISY6ZcnNqJcxAKY9Lu>}k>z~gRp8CQTY%TXT466Vf?n`o#hxD)3`~AIqS|(ZTrYY2Fh* zh_-J0xOG))Mvobv(mX9+E9?@pm$naAr1b%Q0`Lp9pc3st) zl5wdcK^veA?dt1k!?gXmBJDunCj&o)VIDltxxMJU*R5Y^^VW~m3w8+=(ki1+(}ACnfKqFk&ZtWRHBYPV z5^AAV!xd=};130UCWG=)+kA4~M|Jin)Ogy{(PKJ%mZe&gK6$fNuPxIWv_{~4!25x( z0RFJe+EH3FSERK99{@hcFhl>lNwv*&mig8{`bG`Is0nl|j2zx?;^;9+mRUPmJC157 zo&D5~*G}Myw3C3J1^jH*o2r4-j$K_fchkHzqehJ$H@siTxN#HK%~>;Q?1a<_tDUZ$ z)1~Uq)z0IJvcpg>#Nq7Bl(lLjx88v#;@O~wuM^2=Jcl^lIr9`_-+d$QS zxpswirFIqYM*trNegW{+o3)MFCay@^0{lYYYZz|r|4{ua2A$45_v-vLC8NfTqH9>m z*iq}|(%;68OPxD6Yj@Bt-J;#9-KO0R{375N1HT0Ly3N`)Z97+_-39!Sz%ON8s{bE$ zDJ&f`_pUGIttpu>X5#REC1Xa>Z!~Ju_zqL#A&o7{>9%6p6B=EVuLZsV_(s+tW(>^7 z`p>LNGlO5yUhQ%iy{5g+6=`n*e-!Y|4620=BiHH|&};3>VF_(u$;gtDRE@u{eL^+< zf%c*Hk@hk0ZNM)Feg*I=H*242JEIL;1^nu01OJB_pYn0(TYv4Cw`TO%iF6E&95X)2 zQhu-fLY0$tCEKn2$`xt91HTsdb*vjl4|Hz(THW#05qGxhnVGkeR6}m%tOkAZW-GE{ zE3xvx>%ar>#{z%cW~P2$vEcSk z8uxuVJ2i}%qxyPwRSGkmgX5>o(B>SLP=_`bn>J5Go+TnThd|F(3H7EfpE10mC)1(& z+73s?rd2)8+&pWXeJJh>ZSi4oOSGloF+4{^o*N#+#DY7PfAUGxyGQSriy@P-c7b+@ zDh_4Zh1x~h#Uk>25qW`#yii15v`t&CU5ZV+Ttr?hA}>O*{P~oW2x(~~n25n=QHy8Z@`j9H zo3`VyXaQ{}Hf@)P>=luHA+)WbS?=y1Py8+{7EX2Q%u%O<%&Z}xo3zKOJU?1{j5eqh zBJxTRd6kIVE+Vhqraex3JT~o#BJvs$d2I;vy8lC-*PF_pzU+aDo*cXC59Z~mVxHs3 zJ;z z{Tp+_+MVsp3%TzH_4+>TuESzJq~u%yMjeh~ z-Dq7DHl0R9J|iNZ4dFhgj$P4tUpt;sb2@pdIy)Vz$T~7|4j4S0q8oQOdUfM~E*4$g!lbNeZL1u1-Llub}H64}X7!(Ozwl43me3+t}icL37M7}B_Ukf3>uJU0+ z;zaqnkcv4nvqR=hW%LjU))nbytDuJpIdpS$bFt~>i^w-cbANE88m{b&gM1;KOV@DN z?l*v7jpEQ1R`wKsWguree zlI7A}tGn^A%)Ci=GdA6=BJu|j`C|y`Cv|!`?b6RDhf@#EtdMJ_8kV7Ruh9C+6p_D($lpZdA3|cA?g8C{x?Q@5gfvV@BZM?YNXCKHOkH?qN95qbi!O8n z$tizXs5oFLZ+>&F-{tQfvOF1D-%Kxd`x*51A=^G}4fQSc9X@YEz_+%urQX%p z(AmAA&fDNu_q3!Jb~bzK(---D^__Krj`SsNuiMq(9?ZrMlKM}u-rx;mQOuJ+FE_O;zD-Y$2G>u_X-Y7Ub4Vwl9iWgp!u>eoz9 z`S&CqxUS@Fu5I`E1L0*?*C6)mA#7=IQAQWK8svX>q&9fm?E#m^@2lO=;H~r5x2)X| z@U=G8w=^~%js*kjCMv0+r2yS~x(|Z+y}I{>G`v^$p^zend}l-d^2L1L{-yEoe)*4e zp9ZUYb)N}I3V#{KUfmbEy}Et6FG;S}D07NS7I!w+dFK0ko<*)kcUfm^t=li8BZPFM zkmO)__5U&>{u_}E&4asxOO1PI!K3?D_g%GS{Lmg8%!94E?{z=m`wyk6HTvS^OAoF) z$n~Ffzp7mSMMxuib-xMesK3E=J;CZ;eT0xk4RBpQT(?gzku0RB5OYVX%>BPtvcyn= z@Rnh}4s=+5lzwzDzgIs>NSa=Kl#sN4r^7n6!+Ig<20Luj?bDmo4jV!p)~g-CC>{I1FZchA z0Z=pVG2ZwmItM$RuAd&v-=WXYXX@?xEPb{Ye&L{S^IFeZGF0kj4usR!DI| zN)S?#kdlRzDx@?aS%oxNNEt%13n^Pjjve|L`kDFyeWAWcU#u_Dm+EKfXY1$a=j!L_ z=L^Xxq^UwG6jG^><_f7yNK1sYQb-j-suWUKQC6X|Bv0*S=m>^Byd^@Ka0sA0 zM^15RR*v14nd!{3WtU`?+VUL5MK*g@W@bTYc4ldDM$RD6sxU?IhX5)m%_u0&$gtb8 zvh8+TcBZ4mmRIO3vgJB+^X!hCyga)zcMxbz7%1@&K&8b6_N?sU99xmSpvab8LLj?-3HTvS?6 zn3q?aTTnO%)D#9vIRsF4Zcc7)W=@99UTDv-WtWr`+X}O?a%}djoQ%@atSoy*X3-!} zYZz$KA%N^zc4w*GUTiDP&MmfO+q1K51;ts}wyeBjhoiK}?ku)v4g&eZK$b%QIWis2 ztjvs3TWN7gsVzIFILnq-P*7wm$;ipfEY2$`wL1nk$LquU8YJ5xfJ#cTa&t4B8Md6l zJcljYUQ}vx<~TBJIi*DXO1l|Gc(&(YIoXm z97S2Vj*QYlicSpkDC-bFc_kS|B^i0Sw%h_0C^xszR+wFyVaq7Y%`GT)mgE!{3}N9OS5td?6#upBD*cyX?NHPGBOHn4o60zvpBQZ;k4TaDcT$c$~^>7p(CRx z$C>N2If{#mZP_`Q#WrVAaiPtTSx}H`FUra;$PO_(u!NK^if zP2Li$zCxuX3~{->SB1DjNcp|`J|Ru}I}lf?W_`PmroYVR`fK#pGJ?I-kJ+NXLBB)4 zv|6KauWxs?)i3qC+B)j|&Fz6z&29DW^^07ArXWWOX@-yrgf!trM(A(W-=e=YV!Zyg zYK^w6d`^F{`R=yHKvT6wGvwKFm$%a`q?sBy|(BD}x2WHIxVVmc8K@?!ClcmsDEgXc@GPzXul({dH>#NgPxsDF8Ic>gJ+{pT`2*1x90zb>TN2gARu-=SX#CFuX~>L*E*repv{9(u<5RjMLw&zts?9h^^L)86r$-_$Q>9LRzjFwucdh zI77T4VMST_DxV!7{)O zgH=eY_Oru~uF{(!q}2!0tFpr%qBqPA%OF^uVd_BVLyAB6YKEa;u$@97RrR-1qP9~i zq%{Y(GgrSuzcJKKb%-zNL)pGiWqZ^h{1U^`V1BQmTu83}Y;p#xtuj;$P<0T&)drV} zSRuA*~gXzgnZ6Q(RK)Ue{db?vFF8HO7Hw z1^&9G=776SJ-1k`NvK~Js1KF3d3|**udUA4+TIy(`=^9{ZSr*lmalg@svX&boxi|v zNict_;X=bjhKq$15K^a*)@?OxFPTMg@r ze68(Xx4I|1yt7r^&KK@rxw}!_)7Me0G5@8?!uHS>^01*j5N&C5!0Qh4eV1WdaOyV0 zm4>Sf+YMI>X}yq+5z=u&I%$yV>kK!lR9|nn!LUO}-9p+Rq@JyYn+!J_ZV}Q(A#D=U z(bbydzmRYMk&FDzK7VtdyIN!U_XP`Gb)Ir}yUVX0JFV7C`uC;7ZG`5j#F>QuERI_i z$`1{8;aUmU5`=?x~;ta{wt z9jnXAC+}}HcvPQJ2}TB`KJ(b192~K{ti06M7FbbMUgY!o{4b4+iJcG^pOCn`tX$Qw zq|Wsn%%cch)u6yJq=aZS`(jq0d{tCpjf` z(&{jUZLU`L%J4^b$NskZQqxwJl`jl&;l+_w+vN0&%Kgokx4Y`xE6U1Os2Ug?sTp3F z!Jgz4dsaVvefG3H$tgLG%Chp}<~mjMUH1C z4R3`M_hccRBBUN6ohqc$t}?t6p)tH?ct1iDp%K#Q!9_wkLr7-@7pdQ=n(*({ZmD`m z{y!Beb_Lwknv{Q!q!ua-xR zE*H`^A#E4Zbwb)9q+5h^hYA~t{EbJ2qmgsfXym^*-u~C?1dN(6^7;QIa{u|4!L)}l zd?HJ_P(}VZtlGql}Y`)}XV`m}ayH=?WqB_8D!)$wKNA($-~N&Gmt%2x%wr!6N%_Es zez6>7%rQC-!pJjD5z>`Hx@w^Gz=y$VGmM35wV7(uSBIJ|G8PNz8X;X9ZhD~3z=y%g zbBzlQs$6DVD5UF!bi+XDfe(Y#mKs;6)t0Gk-WY0grE!&zZW7YX|GCX-W35`bMy-5n zsB)dLUP!kI>GuCzxy9J3RvyCWHlt5ScM9pQ#F@u$-p|{U+Cv}qn*d`VIQ3d%=jbm+ zKQ#I|mKiq~dyE^6n~X;b>7LNlGFOS=_Xksibf1v!7t(`5dPqo*sJ#s<_WlgY;H3n{ z(4_?bqq8c1JpyA4UrQkERE6>Xua%o{x6cSG%L6KX|DzLK2j87x{6`pRmx}b?-$i4* zAPo1g3isch9W`zVgFUK({f|cUfYV2z$i>)eyf&DBwXx5*)ws=grSU4`cH`B?YlQT; zke(3IlR|n*NKXst86iC@r01?SUT3`Ccmp=Z82d=VCZy+ue2tLR%~8SOLV7%?5s_o6 zHPh!5m#pylJsqmMwA59ro*MQ0yk57zy43D0bmo-g+3aeDGTWK!usQQ`GHpdgc^Ns` z8Ks38xdm&~>dkHSzOGXBvX5#_{G8&Fh5mZC-(7!bSoM3=2P(~JEA8}pJL>#yciRGA zy}Mdt9ZF9ad61-KZOv;t-E->I4;UXb?lL}PeAxJi@lhe|7Sans+9RYFh4hk;UKY|n zh4jkR#>YuAK52Z)__XmElGGKHSA!ZMy(Z)=A?FGCvY;kdUNNV*WT~rmzN^;lt=4Gf z6ql4Y`MSzn>zW(Y2u2M_OX`~gK7WDV?W)$;hO*> zKTuirhVf0~TgJDI?-<`TzGr-2NUsa&4I#ZLq_>3hwvgTt(z`->?`q?R#*b8189z0C zX8c^0)c1w-gUTl%n}wVvWQ&L#v)UVKZ>ity4hN`VGD1RA9&okQyZrSk<5aT4uT~E! zq0xt8@&;F@H&E=Z?Q9IQC9XL9XptIEb*QJMm$=*g?hbcbAbgyIGFWY- z|GA3vlJR@v4~#JWX#9zK<1fZvt2O5K@F}`55uvl#>jPyW5jQou+nPH&4)I2(#tdP` zsvT8}3+aO(Li%1v-~7|~yYUa>p9+jnB9vhfNyc9z#w(H3S8L+x_aD{^2Ow&i>%g8= z`25ZNy;|Px^1Ist6?NeQ;;Li5vaYGwTkm(b3F$*2eIumrg1oF8p&Y4x5C34Nd`Pnst`l#lPLnTy?Yw)qEglf)sVMBl2!FNLXQb->Q=_4WiOI>B$e}Fxd zl~MJf*6W@VaJLqRASVwfd3nI!8A_|{PX;OaVD(nTpcoZJNS_Glb0O{B!wAK!j8&pn z4{h!Y1wIP>ZiBV$1YvP0K3rBqWQ{Bz}K4n7xy7^(t^tYEF)JDEoUheDk*Qo@C zs7MYy-M4DUHipl`DS0JLv8ZGGwUEA2yVF3U@;^U5KcI-=E@dd0hfa+5DR#9dhg#wf z*UVOOsx_$x55Qor4-~9E#i7Fe?eYf*G4&Bi>VHb4GBt?GgYSg&ZU2x?4_5aYe-hFM zVY&*G!fzdmdLmvlnAtAGGJh1Hk zqr24yJ)hx^ z`cS{rrK_P9MNLUnYsU6lq(i(L*Z;0$eZbwOCfo+?NJamqBToZ#$wxw&TjF&*0`Qr^@N?nk-KSdf39+;vNM=} zrE-^Yw{nkiuX3M|#|k-G$m4`OUdS<5Mrf1=lm{a;%0t0LLXH)3occ4NS~DiBilx4~ z&JNYJShd{cZ&v4^jul0|who`yy*gYX?5wIQl7sHVfQQh}YDIZUc}7*(r-eMBH$tOk z$)X1kRcEbVp~~xg?L$n(3(AXXwLL zqL7oT4j?aa=FvBWg5ANo`;@N_s{1eH8zH9(c~ZrG8dZ-dai%|`3DnG-+EG3gJgXD}IsoGD~`wPsBJ7&f{?zFgu=_d%4KqD|um2sOnBIlG_GIF-b|lW+5O*L;O_NQTe<__6D(y_1={vBrV#+nm z4CY^La+>l?Q%qA$`KD>6>82S%o+9L_Le3ZRG$BtH@(dx*6mr4UrUFx;smN4pDlwIs zW|?LSxlqXSg}h40tA)H)$m@hGgnU7@CguPQ9S9&Mmv^_;`n=VexWl|JbOq{~N}Jp2 zt2Nqwc^;^Jc-a2yA;upDHl$b8nuNo=F7&%yo_1ezXf5k-qz!n9)tYgK!I|%C99#i4 zm76Mp`P)rPP0LKnO)E?*O{+|+gLr%f)BggiI2 zi+W5@6Lbi9UQiPb!%9M1EZp^~3)>NnR>SNZV2~P0%x-r3)digHYE6iJ7Ite74X}T5594-(-J3%+GT=|C zr1WcoH#qf*{?ESP)WN+gCckNe%F+%~z|?74XX-MoH+2hnfso6DyimxCguGbDON3m$ zon&l0Z93X?OoYZHg6ctWm6=P0yi7IEE5Z~Fh}$58>ePhAB40h}Bo;oud`Md3nf>HJxcX zYxVwZ66NkdpJ_9`A;0%m{)doPhRdI8Ixn=bXa5F?5Y6T8z*f`wrVCU*S39tGqW{$p zxWgNU0qyIz#ER(>)0M&eYfM{Amzpj!U2eL<)NAT9Z8dEZa)pp9g!f>6G~WK7V+sY;J2T zX;WwLdNozGKl8K1T^9(i-48HW^(fVFU!6z#l{R#Njan@vJB8h0Z&O{~?;qi?466RY zL2zo({h6*M?mBl{;9zR1HM%*)gUyG>_W;Mno|R*bb7W+0+7wbO(>X-oHSwlLgHwA=j|qAI_K97lC)Ec}3Av?zct)9? zHSG>M`%KT7o)@x5$lgBF3#L6nZWVIdlIHp)f$l*cb8%yTE3XZ-V!Frlrus`ipkjK* z^g%FxtLa_Sd#3k=>=$x}kONyyAF8S@_P*Y{0 z8-o}67k9cl-C=D}wMiWm)dYnuLvwcon%f#fX_mIYVEcPb{|e@BGwm~dY5L0awUE1n zyk5xNLf){=^bI!Ccc$-!+!Kpst;WSaz&VdL+ZJ={M6KL1(Y&cOh@=HT@~%O<_kQ)a#CdP^4(3jn z`vclH$bI!pLwV5xHH_=*_$!W@ate6m!1n@h1rB zi(d0IA)ht)YNokReHAUFPkYTpLf*Wn|J^9_Ec2W}LgotjAN{Ws^8)k2!Dbf;`JBO5 z<>nQ^{H^AtN`i`au8_VE^5(7PBK5D3H;4bN>^~~iUvHFojk#v9HJ6aj?|-G3>&@;1 z<*Ui;RoiGbx0pRbzEH>)3Hjo!*vxI_u&KU8$d`s^^!dKVfuR{{EJI=YJa>0^qtMV& z{W=xi_|Y*~P?fu(1&7%C{zCmUk1}_fy9U)|y^y!`zf#OS=8b^Vve~6>{GpviFHITZ0Ei z4CeFA7aas}vH21qZxiyB2QD5Y&OG_xx>uOD9#nUm`AQ*g7xLBN#s`xA;W(~691T@# zOg%le9BW)wjv94m+MOBJxZLcl{j(Dvo3B&Tb$vp5{1Dq=%r}~ES{53Cpt=`D-Hb67 zNB}HA4lsq#{*=)d!RX6j^z|_MRv3LRjD8G8KLMkkh0)K$=oeu0KVkH%F!~J`{S8Eo zg{WkRN`V>HLA?j6#`Us+agE8qarU1scV9Z7sb3Tl@0mkfrG55ll zoiOGx81n>-c?!ln17n_pF|WXw*I>+>Fy%?+Tr z5i~c0=2p<$4w^ebb2n)21jI=4QQi5J07%N&~}102-;IXdkJX!Kzki%Zv*X4&^`>>CqVlgXx{+sTcCXhwC{oT zbI|Su-3-vp2VEKH7J+UF=qf;01-dIicMIrl1Kk~^!!j_`f}tJ^4Pa;j z!&)$OfZ+}>JPd|M!LS<)d%*A#82$-{SHbW)7(N2SCt&yt3}1j@9~iy@;{-6y1!FfD zuLk3*VEhi02vCNDB7<@yC`W-Z8kBfY(m=6-k_k!{C^?`Mf>I3198l(gvH+ANpezMt zIVkm@bb+!S6amVqpj-gT7ErDMF9Y)xVD1C+HZVU4=8s_PC>U#pu`6NhdKkL_#vTV_Pk^x} z!`M?{>^U&@JQ#Z;jJ+Ag-UDOrgRw8b*cV~!%P{s882cKGeFMh61!LcVvG2jy4`8g? z&X+LuYluD)qO}mMhiDr_J0RK#(Mut^3Zkna`b3C63!=}4=p7J!Cq&;3(Jw*tTM+#Y zjEjMB$uKSz#x=t@Ka2~&xLaY|eK78R821{CdmqMq0^`1h@fsL!g7LX9egTYM4dW|e zd=-pe1LHj~z7@u=gYoNO{011m5yl@4<4=O|r@;8rVEh>{{wx^(0F3_}V#Y$uEQoPI zOdG_sLre$6bVAH}h}i%!XF|*c5OWd4Tm~^$KujOR+z2r@L(Clza~H(i12GRk%r1y| z7-C+4m=7UlAH@6uF~31<1jG)9SP5cBLhLArje=Mc#Ku8v0>n;)*c6C$LhKZX&4<|O z5IYlM&w8UI(!^LF_FMdmF^w4YBt^>`sV%6k?x+*iRt#JBa-OVt<6#pJ0Ln z6OMohI+$R92?|Uw!-Qy1l+aSI@BA;i@|oEPHSAg%-AIw7tL;*N*76Cv(2h&uz~&VsmeA?|#LyAa~8 zg1EaO?jeYK9^zhrxPL<2s}T1(#JvM???K!L5VsfNkA!$F#Oonmfp{~-Pl9+0#M>Y~ z9pW<~z8m6?h4`}|{%nZ95aKU}_$?5B8N^=+@!KK(8i>CR;%|ZYS0Mgdi2oT9hCxCk zBpd+=BOpP61T!Q|goG4Gm;?zHNU%XdIwWL5!gNTO2?>RePz(vBkT4q(=0buG5>A7J zTOi?8NcaO1V<9mG5+^}o8YE^xVh$vhLgH*loC}HbA+Zb+7eV3@NL&hu%OSBE5?zqE z2@+3)#FHWMMo7F167PY;S0M3SNPHg>e}^O)l8%HVJ0wkoq-l_}36f5Pq>~}(8c4bY zl5T^f=OO7$NO~KR-hrfdA!#2ZeFaIsK+7uf3?#n{$^U}n zUm*E6Nd6O2A|OQvDF#SUAjJ$R(U8&%DSk*f22up1oD3B)^lL37a;9LNP8L5UV*gNAngrE zdkfOufwcD^?E^^r2+}@*w9g>z3rO1sX1 zhJ!@{%MoB10hXh{G8!ynz@i0<9xO(%n7}d?EaSm40W1k%nFy9tuvow{87!G#$p%X< zSf+qw8dzq6r3fsgV3`A!`CwTHmL*_W29}jzsQ}9wu(-fd50*x-w1A}*EbU16f9SOWh+>&0?RdExgIPxg5?&l z+zytzz;Z8G?gz^*usi~m$HDRxSe^yTZm_%vmVbifHL$!1mUqDNK3F~i%co%Z0xVyG z2Ua6k&0rk|)>yE{gEa}PDPTrC?nF*41FG0&5Le>%iIo)@HDJ!RiC6 zAFQ2VT@ThCupSLo0oLQedJldJkB4g7rbLJ_**R!MYEuKY;ZouxY?H7Hs3dmIJowV4DfH2C(_Swiaw> zf$c)DT@1E6!1e&xc7g3Juzd`+PhqkKCXa>5<6v?=OfG@RvtY6tCbz-lc9?t#Ox^~Q zuY$?DVe)G*`3*>qh4d6ip9Ja4Abky_*FgGtkbW7YUjgY4L;5q2{v4$L0O@~1Mg(L` zgpA3MkpUUikkJGgEs$|GWLyjxTOi{O$anxUc0tCwkntI0d;yswAyW^TM##*9%zVh4 z4w=o6>4(e!WL^lFy^y&TG9QD?=OOb2$ovKD!@(|rJpt?%u-m{s7wqL=Uk3JdU_S=z z0_+!py%+3T!Ttc)p8)$)VE+c}zk>aD$eIXQlOZbuvgSk9GRRs1SzVA7gsfvB>l(2*@4|+3}E_2H94~o(I`WA$vJw2OxVBWFG_BS3vePkbNCw zKML8;LH2IQ{t0p-AZIw_OoW`tkdpy9iy&teCjz(}agX16IxC9)Rg5!2@+z*Zi!SM<>-UY||;P?%4 zCCEJja?>H#3As}sw;XaSA$JYr9s{{2L++`Ny909XgxtF!_jSnq0CGQq+&{s2BsfQc zGZ~!e;LHT)GH|W|XAL+vg7XA$o&?UT!Fe+{Zw2QY;QSDrA4A><$kRcd0rIjTZyMyy zfV@`7TL*dTA#V%hT?u*HA@6a>+YNbpAnz;4`w8-XfhiW4k_}TFFr^ZvxM4~oOt}!I z^um;_Fy%>@vInNT1XF&5sle1>Fx3K6vtg>zA^%v&KMnHFfc)Db|9;4S5b{5Q{4XK@YnZ0M zvc zuZ8KS!}RlD`UNokW|)2tOurANe*n|>!t^g;#u%7kh8fW?qY!4ygBc58Mmx;th8aCD z<1(1B9cEkuGoFGOFT#wMVaCrea~RBwgqgEp=3ewY&p zb4J6QF)(Kt%vl3-YGBR|m~$u0xf|wu4|D#2xxm~!m|F;Qi(zgT%nicaV`1)2nEM#a zeFEkk1@jCrPl0)5FmDCSTLtsBz`QGA-gcPx3C#N%=6wV6?J$2T%%2AHJ7E4sn13|P ze;nrThWUG7K{PB#gas2}!LhL5G+1y3EO-ePyafy1fwB}R%YZUFlx={rX#=*h_SlA2;{je|q3-5=8kHf+zVUZ3Nje|uou*e0AT40eE7TpVr9)U%V z!Qvxeu?80FVDU0oyapE6z~Y-=@!hcaURW|5mW+ZWQLv;OmQ=!$HL&D1Sh5qAJOJeq zlt)3i2Fe#f`6?){fbz{yei4*k0_7h-`Ccgh5|++@rL$q_Tv&Q6EIkdDo&if=hNW-A z(syCmcvv9E`h%csEdb71)vSbiBSe-D;_4$Jq# ziX2!m9ahYQ701Df(_zJ#u;K++@j9${6IPCcl}WHN8CJSsWgDz)hn3gE%G+V(ov`vp zSOu&a2CJsSs#&mV4y-yER-Fy2&Vf}g!>YGo)w{4d4OVBt>Ks@tVD+i6`gBiKoeg&1^K;_R+`5RRJ301?OY9v&pLscGBErY5xP*np} zbx_p+RUJ^(3022J)$veuB2=9WRi{GL=}>hhR9ym9mqOJQP}K)j+n{PYR9ypAw?Nfx zP<1<0-49g{Le5G57vLHJu8H8946Y1tWr529 zu36xk3$FR#S_rNs;OYg}wcxrFTz7-(L2x|;u1CQ2IJlkz*9+i!30$v$>o=$!2DPzJ zn*z0ypwc&8w1?sY)&H;5*P}cx;O;C3N)SU@+o1yMHsJjj7?tr@Qpze35`xEMC zLj4@5p9l4)Lj5^Ve;(A|1ob>OX<{&!PT% zsQ(G-e}(!#z#ReZNN{I^yBgeQf_o>p_d&x5Xi%WR3=QL;Ar>0qp&m=!M2^XzYQ;W1;bQXgmoT zPld*_q48X3yZ{<6hQ?lKd<+`jhsM3o_&qfK2#vo$@I-(o5t3&3+RcrFFc72w$lo~yuf z4S22x&yC=@1w6Nd=PvNv3!eMIvkN?rfah`WJO!R-!Lu7YFM{Wv;CT%^Z-VC?@VpP6 zkHGUOc)kG7m*Dvqc)kPAkKp+QJimh%@D2yB4Bipo9R=Po;MIZG2wpRI$ALE%yz$^o z0&fa<)4*#3Zw7d?!0P~S9(eP?I|ICh;4J~~Z1BzlZy9(OgLf%-SAcglc&or$1Kv9D zHh{Moyk7A7!0QKZCwSL`w+FmOgI9p}c<`PC-c!MQ26#7v_Z;w^58jKwy9KSXp4uoIncHQ+Ll6FC$t?6Z9!jmFd@I3~;=fU>^_Y zf{t6DV>fiX1|4re$IlQL27yQjBtpOnfyoe<4}oP6SOJ0c5D*YJ4g!}#;3^1Q4T0Sd zcntz?K;U=il%exT=uCyqOz6yl&MN3^fX*i9JQ_Mrg3eQ*^DgMz1)UE==SR@_C3Job z>vXVg9IT6hbq-iJ1J)J5x)xa10qZ(p-5Id%d{}oOth*7`-39CJfpsszy0>86JJ59m zbZMYV2VEBE%7!inbgh7{8tAHpt`ni_Ea*BLx^984d!cJ5biD#y??Tu6(DetbKLXZ| zfb}+5?|}7ASicn3SHb#fSRaJ-r@;EtVEwJI{ytcLKdgTb)_)G`_d@q5=r%&P3A#(5 zy9~M)L3aSUH$nF?(0w^{Uk%;YLigj)y&Jmsz=q+lVH9kLf(_ZQVH#|h0UO$3LpN;b zfepQ|;ab>mJ#2U$HoOWOUWcCH&@&2pqM*kKJq6HH1U+k@rxALZp=UGnTm(IrK+hx4 z^DOi{4?VxY#^JD0f{hbl<7C*F0UKAt#yZ&OhK*;y#`9t0g|KlaYrQgpEJK z#-Cx+IM|c~o04IZ8#cAUrgqqLBW$`0Hr)f8K7>vCVAEG{v=)wzhNH*B(bM7owRE5D zK@do|hVP-10w^VsUXxHm2w2y)_U@|dT2@_q?`?GzP!yGB!%C3^r1vI>q9|fTC82le zkOT+`q|3QH=ggP)!*jjQ%s-g^7t#L``rk$Whw1+){okPfJMFm+s=Fhk5ByUiy-kR`AjfyxfeJ+wgKbUVfODpW)^I z^76O5{0lGt%Ah(7N@h@F23^UZe=+Dr2909S1O`oH&>svsz@S22>CG#r@yZ#zGKg1( z@yZBZ`Gr^3@X9(~P2tt{yxNghZ{yYbc=Z8Zoy@Cqcy%7H9_7_P8H~ZFGWc8upU2?; zG58e*4`%RJ4E}+^Kk-@uuQlbh=DcmzyneO}+p>$`b9uquYtF#INl-^uX189tri3mE%9FoZ@Y#k?k4Tk&!ns@=iwH&Bz&yT*$~p zj4Wkj4I_{9-ub+D1@B$Od!O*$WZs*~`}KLh3GbiC`w#N|Q@sBS?=R;4Z+L$>A0+cZ z3qDBYgPZx_EOw|c!l=s`brqwoVbm>*`VXV-WYj&3x}Q-G zF={lUzGu`iMt5QKKN2|%joYI z{S%{CGWvH$uVM6hMsH&DenwX_x`r|J7}Jz7%^A~@F>M&ro-th*lfjtojOojms~K}6 zV{T^5t&F*yF;6h&X~sOunCBVu0%Klf%sY%3$(T`$8OxX)#!O($G{($i%pAtdXUr1D ze8ZT%jIGVsWX85&Y${`0F}5vZyEC>YV^3%7nT$P$vF9=NLdIUg*vlDvBV+%~*xML; zJ7e!;?A?sLm$AbcJB6{oGxivtwBVC7_~d*(xsY)`GwxT$tzz72#;s%AM#gPn+;+z8 zWZWLc?Ppve;|?+I2;+`2uAFgIjQf+EAg2yF^~p&fCz+fk z$(cyb6mq7MGmD(L$o&_&H<5b_x&I;ePIB)d z_kMC8BKJ{ppCI>Xa{o*2|H$o6?n~spLhfti4k33qxo?sCF1hcM`w_XL$^C@fTyj4p zcQU!t$el^<9CGKA`x&{PllvvPUz7VSxhu&1k=$R%{f*o_a@UZ%p4?63ZYB2*a(9ut zm)ry77Lj|H+!At+ky}A-HMz$bUyJc|8Q*~MiHvW=_!P#U#P~B9e?H?cWc;m+znAg% zGky@`hcSKxwPE%b65Rs?DU1OzO_0o=m!o zN!KyydM3TZq#;Zi#-!OyTFj&+Oxnt%y-eEA+4Zyn`tTOliuL=1e)0DHk&3Vy4{1l!uw}C{x~N$|p?8 zVagXw`Hm^yGi5hZ4l?C1QyVcgg{dbo^;D*w%hdCjdOK4eVCq9m9nRGEnff79=Q4E( zQ@>>DZl)e&>S3mx$h1~WYs0iNnRX%5E@s*jO#2_xUS!(4OdHL#u}oXZw6#oI&$LRW z*JgTMrl&K#H`Dtt{W7Ls$Mox&K7i@3GkpltbD2Jk=`)zVoarl>{u|TxGyMqDOPSG{ z8C{r>&Wv-IaS1anW5z?wc$yi{GGi1oCNN_nGrniW@65Fmn|%^O<>!na7zG%<90bZp`Yztecp1C$sKm z)*H-vhgl<;HJ@2uGV3d5ZD!VPX6>_3#VooY^IxwdbbFN^{^~||}IZra@1?Kc;&Zo?o$(-5D*}$9~%-O}<#>`D+ zZcFB#&fN2udjWGFW$u5O`#f`VnLCZSGnkvl+)d2g!o1qdOJZIl=JjUYY0Nu=dCxO% z5c6JT-g4%xWZrMgt6+XD=GS3<8uK%mpT+!Zng4I*-^%>wnEx{KUt#`O=1*e&6z2cV z{0+?C#QbU&)MY_^7G$#ER2H1hf?HT{4-4*N!B7^AWWoC^_>2W#v)~&R>|wzn78J9v z9}CZ7;W;cE%EFN>e4mBuSh$^qJNT>@pPj;Ir}5cyeD*S*y~1aCe71?twy>xxi~6vr zFN(v$0iR#V z=U4OjhkTyH=i~YO4?aJ@=Y=fk!IFL~IfW%pvE)UT3}DHZELp*lANV4HFPid2bH2EO zFRtf{8~EZazW9hQM)AdJzSzPS+xW5_UuN)SH@>`!FCXU1NBMFAUoPd#uUQ%_O<-vf zOD|&S)hxY+rQ=vSg{9M2TFlZ)mR9rC-}&k?zPf_1M)B1IzM9BaJNT-AuZs9OjjuEL zI*YIG;_HX``cb}~%GdMwdI4XTvn*Ivn`LLP?0lA8$g($B_72NNvTQZWwy(!t!P;Z^QC- zEWelKkFop-mQQB+9G1`HyE=TA%y*6X?s~qvjqh&fyJ>tkpYIm(-EmelU_}Beu4Ki( zSaBmOCbMD=E9SAHlod6sIL`Nf=ljd}{tCVy!}k;Uelp)5=KBi1ui}Su_~8*x{$HDygIzRr-j~n=L6F>Ffr+)l&3O_x{Pygko z=lSUye)^f8R`PR0em;?(oAL9F{Co#L-^I`K`T0wJ{)%5(^Gg?gN#~b={PG6B4CR+! z`DGoyY+z+aR(5A)PgXw0%I8@5KUOYf<@c=okzX6|>k0hYlwYsm*PHqE7JmJRUvv3& z0>AF!*CKvB#BbgCtuMcw%x_QgTYr8V$Zy~9+t2*AlHXJKy*I_z$#j1-~bqTBPX4Mm{dWuy~v+5aE4Pw=+tUAo9 z5>_2!RRyc6S#_MeTIAIwuK{_9-WlYbMc%pOolo9H{hPeo$h(8QyUDwcya&m9guKVe zdy2eg$$Or>7s(q)-XQV@llKOB!^kTmuadkP@{wPg{Cea!BtMD##^j$!esl6u$!|q| zTk<=Q-o$#Uq=3wG>&X8X`8Sb& z3;F*c|4#DnA^(2zA0q!z@}D68Y4ZO|{{P7DPyS2fze4_NGJ zQ}QQ~Kb!oyc?5VoYkvXozLpE ztX|LRKUlq!HLX~a!J2NY>B*W*)||?k(^>NrYhGf_Al3|K&Fidrn>FvU<~P=CX3bXC z{K1;PT!pMT$l5g4X0x^*YfokE>8w4EwHL5Pg!`e?+JDIgpS^HPFinTSY z!@63mOJrR#>n>v5b*#IdbvLr^X4c)sx_ekRnsrlHH;r{OSvQ+?<*cvE`ueO-V0{wn zuVek~tiO}>_ptsx)<4erCs{v*^$S_Qi1kZY|0U~Ju>J=&G+;w>Hl(njB^z3^p$i+* z*>Ecx9%RGAYo@UFJZ25^Tzp&*u zwya{yMz(BbYX`RWVry@)ThC(aIc$A~t%KP5DqCM?>kziS%hvbU`V(8%v2_Dm z|MF~Q>t43*XIpQ!oyoSd+4gs~ozJ!_*mf1$2D9y5w!O!;583uH+s3o)Q?_kk+X1!} zvh5JtirH4lwraNbVf)!^KbP(2v;9K0U&Z!+uzd*IKVbVuY#+_`v235j_9^^Pn?IWH z$BF#WoIg_dqdk9gWdzc!?cD*fES9E7+0Gjy3F9&yJ1k*vXFF?9688Iqdv9J1=18MeMwqo!78) z0z2oha~?YvvU3qTm$CC(cGYHA6Ly`*uIB7YVOM*0b!68K?7Ev>_p<8&c0I(dr`h!^ zyS``FYIdz<*9LZNV%IKq?O}HYyZf>G6n3A^?!U470(M`-?!R~myN|NFoZXe|sm-3c z>^YM?m$K(__FToDf3W8!_WYYYBiWO~p7HFN$ezjUnZurW>^a8X+U%{%-UjSVU~g0Q zHfQgp?EM#eZ)EQs?7f@4_p$du_CCViXW9EadtYSlK=uw|?^yPJ$KHJQR<{+WVSj!0C$PUM`&+R8B=)yve>?VPu)iPs&td-+?7xcrH?aR^_TS3> z+u45?`yXciW9)yD{m-!fIrfia{}T59$o^vXS8|{}2TtHXTMo46Kvxd*;6Nq^`f#8h z2hQTaxg0p30~c}NQVv|pfm=B600$o8z|$P~F9-g|f&LtLi33A8Fq{K#ao}AJyw8C- z99YkRJrp#eAccbV6l74)kAhPuIERAsD7cVnOa1!UrgP zio%yD97N$T3g4vg9SYy0@Iwl7D4amyBnqceID^846n;hF4-~GUa2GZ2Srql9=oE_1q3Aq{E~MxZiY}+K2?Hsy`L-%s%0S>*&q3Im@oI^V}RKTGM4%gxE z2^?<9;no~($Kj3~?!w^=4rg(=FNaUz@aY^rlfyS~cmRiob9e!VmvZ=L4zJC>HmneCKlGi91LdkGS-lyafN+wgXfRfKB`I?e%DOo|ukCgmE$r?)5Q?iMYt(5#h z$pK1AD5;?|fzl*On^T%fX)8+GQrdyiZj|<?z7# zq--!{?^5<2Wn(GJp=<(WlPH@?**wY?Qnr|~FDUzpvL7g0McGEm_ENT=vLloorL3H? zD$4$(ygua#lqXZ(gz~18x28OU@+`{FqWm1nFQNQ$%CDmQ8p{7k`K^@SPWfGw-%I%e zls`%NK+1Piv;sh$1QISGLODft>kw!%}6=zX#85LJhaXl3`Qt@vpZlmH3DjuZb5h@<1 z;wdVgrD7lz!>D+lit$u@O2ten=1?)8iqEL{oQiL$SV6^)RQy84Z&a+OVmB2Bsi>l& zhRS+WHl#9%%EnZlNM$Q3+fvzq%Fa}#Q`wu!)2Te4%70LKEtUVK@-`~(pz>}i@1yc@ zDxaeASt_5W@O@rJF;zrs_Vb z9;E6is-C6ld8%HdY9Li_Q1vcVW2l-+)pV*BP_>AvB~&e?Y8h2OQMHn)->J%{YAsdU zs4ArD7}d3@u1j?zs!yQ08PzFNx1_og)oE0BrMd^znN*)j^@UVlOZ7jgzLo0RslJQq zd#Qea>L;mwhU(|2eu3%%RKHI3dsKfy^>nIdQvDg#pHuxM)n8NnE!DqJ{TtPJRIj0W zJ=K3uT|{*`HFc?}PfZhQno`q(nvLvvpN1Z zj-SQx|8e|vjt}AZaE`yp@eeuvv5+QY3;l#sg)@Y+gmZ;kgnNYhga?I3gaN{b!YE;^ zkRyy2ktyO-5$B5dkBECk3>Gn3#1s+JM9dU1N5p&)pNYs9v021c5r2r-CE}=vauHP` z{uH%{TCa#&!$qx+MXk}I)+eG?uBi2?s5M#CS}tm>6t#X6wem!*HKNuYQER`bRVZp5 z619p&?OvkxDWdlIqV|QN_9deB<)ZdgqV_eS_9LS9v!eEMqV@}-_5e})Em8YjQTu&S z`y)|%l&F&~>huf9#k+#%}RE$ZAS>O3gwJR<5mF6ulb>O3pzJTL0J zDC!Ipbq0w#gGHV3qRvE7XNssZUDTN+>dX~&7Kl2FM4cs~&QeilnW(c|)cIc2`AO7S zDe7z#bvBDS+eDo|MBNsmZYNQR9+>UI-#ZxeMN5_KODbsraXpA>Z`i@FO$-Oohb z&qduYMBQRh_fJues8?Imt1IfAF6vz*>Rlr0T`uZfDe4Up_1+To-VycQ6ZJk2^?nic zHi&wgM7^z|-gZ&Hxv1Y!)bA|nr;GYsMg3bv{Rc(;heiFzMExg3{Yj$!d{KX)sJ~d$ zUn1%s7WHdH{o|rREzzKkXmFZnaG_{$v1o9aXmEvS@Um#|rfBfCXfRSVcwaR5Su|KL z8f+8|wulDXMT4E9!5-1DmS~tL8YYW|jYPv%qG21+u)S#5Ni<9o4ZDhl7l?*eiiTH< zhS!RQ*NcV^iiZ6~!`DT_A)?_>(Qt%l__k;`QZ&pF4Ht-pKZu4ui-s#j!?mK}4$*M0 zXjmc|R*Qt@BB8ZN=qC~`6A3qngquXdEh6DRBH>PvaF0lMR3tnu5}pzX&x(X$B4LC` zcvB?2BNE;d2_K4tPesCHkuX&xd?peWi-a#k!dD{U8vT@mZ1hyhwaeBn}jbgGAymkvKvmzAX|*io_`* zahgb+E)r*n#JM7Ifk^yDBz`9nzZZ$CMdDhKxIrXt7Kz(L;tr8mAQFp2;$e|kB9a=2 zqy&+aD3Th9q!UC^Gm+FmBxQ@FlSR^LBIyi~biPQsTqIp3lCBX+H;bhEMbcv;=~qYX7BKc{N{IW=XMI^r_l81=o;Uf7hkvv8ue3Bt`v=~5{<4Ajs7Vb-5?s>EgIb`8a*Hy zJuDhMCK^2{8oeYM4Hb<(6pcO>jXn{Laz&$0MWd;r(G1aOwrI3mG}lac9vuT{P|`8fS^deMRF_MB@iU<5xxFv7+%L(RhkzJXbVcAQ~?cjhBeV zOGV?KMdOvC@oLd{ooKvKG~OZ_?-GsoipB>-<08?dg=liVX!4k7@|2LxCNo5n*`mo}(c}x!WQAywFPf|oO*V@r+eDKcqRDR2 zq);?DB$^x%O^%8te~J_8iW3@%6HX9K9~Vvgi>3ob)8V4&TcYW^qUrmh=|`gJXwmc& z(R8|KI!iR2Bbv?^O+OP&7mKD#MAPk}SzXaAQ#89#G`mAIdr&lcQZ#!(G#enAy)K%) zDVmKG%_fOvQ$(}5qS<`W>@(49v1qnJH2Xm`TPK=r6wS7XW`DVNie`I6v;Cr3p=eem zn*Ax7hiG0$G_Nn3Cy3_BqInb1yp3qyPBiZ*ns*k>9}vx-70sU$&0i4B`-|qwMe|jn zdA?}ARy1EPS~L?aI*1mXM2j@hB15#eO|*DOw0K0ccwDr2QnZ*ST6`^9d?Q+XCt7?j zQj$cRwq%0RHt3*n^NLepZ zHi?w2B4wvY*&|Z+iWPyYh?C9{CtV>$>?T^CB3hm%TK-M6JWI5E zPP7~>TD~q?4iznjiqSYCq)mfs| zxuVtiqSZyB)up1<&7#$rwEoM#SF|n>ZR&|O%|x5?MVo(#HiJc*k3^ffqRkhg&5xqZ&!Wv{(Po=y zvqQAmE!yl8Z3;x2gQ88bXj3ZM))j3Vh_;EMZL(WpCep4DX*Y5KBJFmOc9%%ISEM~5(w-7&&x*9?McNA@ZGcF7TcnK=Y2!uO zOp*4zNLww^){3;h_HPzx+eF$QBJEF+o+#3jMS2sF-c+Qw5b3ERy@yCYO{AY8($5m< z=Zf_6Mfyb|{c4eZjY$8ONWV#>-y+f<5a|zz^v6Z|QzHFYk^X{6A0X0S7U^$^^p8aP zM3FvOq|XrPvqkzmk-kWzFA?cWMfy)7Jzu167wJ1h`c9F)OQaWz^b(PNOr%$c^lFiQ zTx2v583`hzvB)@4WHc8U9YscGkYPP<71IAT4a18GIB-6ry^sr$e1HC=823&B4dfjSSm7p z78xr=MxMx6BQn;Dj4dK#yU5rnG73dTg~+H9U5T!>Mb~bmdqmg!Mc0Q!*GEOyCq&n$Mc4m| zuKyEVM~SXWMb|vh^@!+pg6MX#=ys#%_N?gkis&|2bbC*9`%rWnCAy6j-Eu^?38LF1 z(QT^ewpeugLUj8|bo)khTQ0h-5Z!(h-F_C`wu8$^!^(X+kinIU>+iJoVPp6805mx`YM6g_Ve zJ?|Dh9}ztt7d@XBJx7S1?}?sMM9=x6=Q`1Im*}}i^!&?LEqWdoy=sYGbw#fRqF18m z)kyR@LG)@SdZmkA-9)dRqE~OxD_itBS@b$h^!l6Vb++jBchTz>(d!}6YoO>gQuG=x zdMy#XeiOY4L?)4$A~M^H%uXV+v&ifzGEWzo=ZegWMCR2Z^IDO4oyfdJWZot+?+}@H zi_Awv=3^rBNs;-C$Q&dxUlo}{MCNdj`Ig9hPh@^5GDnHbu_7}^WKI*AGeqV>k-1o8 zel9Y<6`3nU=Jz6Vwa8p6GPj7#?ILrh$lN0`i$vyOk$FU99u>Wth~DX<_eG-jb)xt6 zqW5j0_Z_15-JqW6QM_fw+xv!eI&qW6oU_dwD61JU~{(fe1?d%NgeBzhkby^o0A zf4R#=?<$d%EV5dPtkxo{jmT;%vigavzl*H%Mbr#<*g~+;EWZfdNZWCGm5m~p3 ztVcxFVcO>rc_Ax#-hZ^tnOwc}?^gEBZ_meP)P0vqhhIqR&FnXR+w>v*@#4^w}u-Y!Q97 zi#|I=pFN__VbP~X^f@lFYl-Z-BD;aeP88XxBDeWi|kG!J56M#i|oE4`(%-Q zn#lf}$Ua+S|6OEXC9?k^vab`_{}S0ZiR@cM_5&jOA(8#0$bLp-KPR$Z5ZQx7_F$3y zhR7Z!vfmTgxgvXl$etv!r;6+uB73&T{!(OrC9=O3*~>)suOj<*k-bi2Zxq>EMD})( zy;o!(5ZOf{`>^QSLi9ab^u0^;eO&Z?QuKXB^nFhBeL?gcAo>myeMgACZ;HNeiN2FW z-}$2NLeY1z==+7}`<3YXjp+Nc=(|$%T`l^q6MZ*|zFS1!1EO!C=vyrMmWsY*qHne6 zdtCIZCHgfN{o0Ct-9^8iqF*1;ub=35s_1v7=y#6jcb@3?57F-?(eGZ-?|#wm5z+5) z(eEkI?>W)$1<`MS==YB3H%jzdAo?v8{k|6cz7_peh<@uuzb&HQ&j0(5L!DaxFH`6I Lzx@9b{dWH!FVcj9 diff --git a/main.cpp b/main.cpp index ffdcb19503..e4c8a2b464 100644 --- a/main.cpp +++ b/main.cpp @@ -487,8 +487,10 @@ void update_pos(float frametime) balls.updateHand(myHead.getPos() + myHand.getPos(), glm::vec3(0,0,0), myHand.getRadius()); // Update all this stuff to any agents that are nearby and need to see it! - char test[] = "BXXX"; - broadcast(UDP_socket, test, strlen(test)); + const int MAX_BROADCAST_STRING = 200; + char broadcast_string[MAX_BROADCAST_STRING]; + int broadcast_bytes = myHead.getBroadcastData(broadcast_string); + broadcast_to_agents(UDP_socket, broadcast_string, broadcast_bytes); } void display(void) @@ -542,7 +544,17 @@ void display(void) // Show field vectors if (display_field) field_render(); - if (display_head) myHead.render(); + // Render my own head + if (display_head) { + glPushMatrix(); + glLoadIdentity(); + glTranslatef(0.f, 0.f, -7.f); + myHead.render(); + glPopMatrix(); + } + + // Render heads of other agents + if (!display_head) render_agents(); if (display_hand) myHand.render(); @@ -550,10 +562,10 @@ void display(void) if (!display_head) balls.render(); // Render the world box - if (!display_head) render_world_box(); + if (!display_head && stats_on) render_world_box(); - glm::vec3 test(0.5, 0.5, 0.5); - render_vector(&test); + //glm::vec3 test(0.5, 0.5, 0.5); + //render_vector(&test); glPopMatrix(); @@ -581,7 +593,7 @@ void display(void) sprintf(val, "%d,%d", target_x, target_y); drawtext(target_x, target_y-20, 0.08, 0, 1.0, 0, val, 0, 1, 0); } - if (display_head_mouse && !display_head) + if (display_head_mouse && !display_head && stats_on) { glPointSize(10.0f); glColor4f(1.0, 1.0, 0.0, 0.8); @@ -717,7 +729,8 @@ void key(unsigned char k, int x, int y) void read_network() { // Receive packets - int bytes_recvd = network_receive(UDP_socket, incoming_packet, delay); + in_addr from_addr; + int bytes_recvd = network_receive(UDP_socket, &from_addr, incoming_packet, delay); if (bytes_recvd > 0) { packetcount++; @@ -742,11 +755,11 @@ void read_network() // Message from Spaceserver // update_agents(&incoming_packet[1], bytes_recvd - 1); - } else if (incoming_packet[0] == 'B') { + } else if (incoming_packet[0] == 'H') { // // Broadcast packet from another agent // - //std::cout << "Got broadcast from agent\n"; + update_agent(from_addr, &incoming_packet[1], bytes_recvd - 1); } } } @@ -850,10 +863,11 @@ int main(int argc, char** argv) incoming_packet = new char[MAX_PACKET_SIZE]; // Test network loopback + in_addr from_addr; char test_data[] = "Test!"; int bytes_sent = network_send(UDP_socket, test_data, 5); if (bytes_sent) printf("%d bytes sent.", bytes_sent); - int test_recv = network_receive(UDP_socket, incoming_packet, delay); + int test_recv = network_receive(UDP_socket, &from_addr, incoming_packet, delay); printf("Received %i bytes\n", test_recv); // Load textures diff --git a/network.cpp b/network.cpp index 7cc72a53a4..953d7e1832 100644 --- a/network.cpp +++ b/network.cpp @@ -116,7 +116,7 @@ int network_send(int handle, char * packet_data, int packet_size) return sent_bytes; } -int network_receive(int handle, char * packet_data, int delay /*msecs*/) +int network_receive(int handle, in_addr * from_addr, char * packet_data, int delay /*msecs*/) { int received_bytes = recvfrom(handle, (char*)packet_data, MAX_PACKET_SIZE, 0, (sockaddr*)&dest_address, &fromLength ); @@ -149,7 +149,4 @@ int network_receive(int handle, char * packet_data, int delay /*msecs*/) } } - - - } diff --git a/network.h b/network.h index ea3f98cd72..1e51419005 100644 --- a/network.h +++ b/network.h @@ -22,7 +22,7 @@ const int UDP_PORT = 30001; const char DESTINATION_IP[] = "127.0.0.1"; // Address and port of spaceserver process to advertise other agents -const char SPACESERVER_IP[] = "127.0.0.1"; +const char SPACESERVER_IP[] = "192.168.1.16"; const int SPACESERVER_PORT = 40000; // Randomly send a ping packet every N packets sent @@ -30,7 +30,7 @@ const int PING_PACKET_COUNT = 20; int network_init(); int network_send(int handle, char * packet_data, int packet_size); -int network_receive(int handle, char * packet_data, int delay /*msecs*/); +int network_receive(int handle, in_addr * from_addr, char * packet_data, int delay /*msecs*/); timeval network_send_ping(int handle); int notify_spaceserver(int handle, float x, float y, float z); From b6919755c292782406240128a409b4acf7c30447 Mon Sep 17 00:00:00 2001 From: Kenneth Keiter Date: Thu, 13 Dec 2012 13:08:04 -0800 Subject: [PATCH 027/136] Roll-up commit of marker-tracking. * Added OpenCV and CVBlob framework. * Added marker acquisition and tracking. Note: OpenCV libs are compiled for Apple x64 arch. This is for convenience. --- CVBlob.framework/CVBlob | 1 + CVBlob.framework/Headers | 1 + CVBlob.framework/Versions/A/CVBlob | Bin 0 -> 278296 bytes .../Versions/A/Headers/BlobContour.h | 99 + .../A/Headers/BlobLibraryConfiguration.h | 22 + .../Versions/A/Headers/BlobOperators.h | 754 + .../Versions/A/Headers/BlobProperties.h | 70 + .../Versions/A/Headers/BlobResult.h | 171 + .../Versions/A/Headers/ComponentLabeling.h | 30 + CVBlob.framework/Versions/A/Headers/blob.h | 172 + CVBlob.framework/Versions/Current | 1 + OpenCV/Headers/calib3d/calib3d.hpp | 751 + OpenCV/Headers/contrib/contrib.hpp | 986 + .../contrib/detection_based_tracker.hpp | 106 + OpenCV/Headers/contrib/hybridtracker.hpp | 220 + OpenCV/Headers/contrib/openfabmap.hpp | 405 + OpenCV/Headers/contrib/retina.hpp | 356 + OpenCV/Headers/core/core.hpp | 4738 ++++ OpenCV/Headers/core/core_c.h | 1885 ++ OpenCV/Headers/core/cuda_devptrs.hpp | 185 + OpenCV/Headers/core/devmem2d.hpp | 43 + OpenCV/Headers/core/eigen.hpp | 281 + OpenCV/Headers/core/gpumat.hpp | 577 + OpenCV/Headers/core/internal.hpp | 788 + OpenCV/Headers/core/mat.hpp | 2605 ++ OpenCV/Headers/core/opengl_interop.hpp | 335 + OpenCV/Headers/core/operations.hpp | 3977 +++ OpenCV/Headers/core/types_c.h | 1901 ++ OpenCV/Headers/core/version.hpp | 58 + OpenCV/Headers/core/wimage.hpp | 621 + OpenCV/Headers/features2d/features2d.hpp | 1606 ++ OpenCV/Headers/flann/all_indices.h | 155 + OpenCV/Headers/flann/allocator.h | 188 + OpenCV/Headers/flann/any.h | 305 + OpenCV/Headers/flann/autotuned_index.h | 583 + OpenCV/Headers/flann/composite_index.h | 194 + OpenCV/Headers/flann/config.h | 38 + OpenCV/Headers/flann/defines.h | 176 + OpenCV/Headers/flann/dist.h | 814 + OpenCV/Headers/flann/dummy.h | 16 + OpenCV/Headers/flann/dynamic_bitset.h | 159 + OpenCV/Headers/flann/flann.hpp | 427 + OpenCV/Headers/flann/flann_base.hpp | 291 + OpenCV/Headers/flann/general.h | 52 + OpenCV/Headers/flann/ground_truth.h | 95 + OpenCV/Headers/flann/hdf5.h | 231 + OpenCV/Headers/flann/heap.h | 165 + .../flann/hierarchical_clustering_index.h | 717 + OpenCV/Headers/flann/index_testing.h | 318 + OpenCV/Headers/flann/kdtree_index.h | 621 + OpenCV/Headers/flann/kdtree_single_index.h | 634 + OpenCV/Headers/flann/kmeans_index.h | 1114 + OpenCV/Headers/flann/linear_index.h | 132 + OpenCV/Headers/flann/logger.h | 130 + OpenCV/Headers/flann/lsh_index.h | 392 + OpenCV/Headers/flann/lsh_table.h | 482 + OpenCV/Headers/flann/matrix.h | 116 + OpenCV/Headers/flann/miniflann.hpp | 162 + OpenCV/Headers/flann/nn_index.h | 179 + OpenCV/Headers/flann/object_factory.h | 91 + OpenCV/Headers/flann/params.h | 96 + OpenCV/Headers/flann/random.h | 135 + OpenCV/Headers/flann/result_set.h | 543 + OpenCV/Headers/flann/sampling.h | 81 + OpenCV/Headers/flann/saving.h | 187 + OpenCV/Headers/flann/simplex_downhill.h | 186 + OpenCV/Headers/flann/timer.h | 93 + .../Headers/gpu/device/border_interpolate.hpp | 714 + OpenCV/Headers/gpu/device/color.hpp | 221 + OpenCV/Headers/gpu/device/common.hpp | 114 + OpenCV/Headers/gpu/device/datamov_utils.hpp | 105 + .../gpu/device/detail/color_detail.hpp | 1542 ++ .../gpu/device/detail/reduction_detail.hpp | 841 + .../gpu/device/detail/transform_detail.hpp | 395 + .../gpu/device/detail/type_traits_detail.hpp | 187 + .../gpu/device/detail/vec_distance_detail.hpp | 117 + OpenCV/Headers/gpu/device/dynamic_smem.hpp | 80 + OpenCV/Headers/gpu/device/emulation.hpp | 139 + OpenCV/Headers/gpu/device/filters.hpp | 278 + OpenCV/Headers/gpu/device/funcattrib.hpp | 72 + OpenCV/Headers/gpu/device/functional.hpp | 686 + OpenCV/Headers/gpu/device/limits.hpp | 235 + OpenCV/Headers/gpu/device/saturate_cast.hpp | 216 + OpenCV/Headers/gpu/device/scan.hpp | 171 + OpenCV/Headers/gpu/device/static_check.hpp | 67 + OpenCV/Headers/gpu/device/transform.hpp | 67 + OpenCV/Headers/gpu/device/type_traits.hpp | 82 + OpenCV/Headers/gpu/device/utility.hpp | 237 + OpenCV/Headers/gpu/device/vec_distance.hpp | 224 + OpenCV/Headers/gpu/device/vec_math.hpp | 330 + OpenCV/Headers/gpu/device/vec_traits.hpp | 280 + OpenCV/Headers/gpu/device/warp.hpp | 112 + OpenCV/Headers/gpu/device/warp_reduce.hpp | 69 + OpenCV/Headers/gpu/devmem2d.hpp | 43 + OpenCV/Headers/gpu/gpu.hpp | 2532 ++ OpenCV/Headers/gpu/gpumat.hpp | 43 + OpenCV/Headers/gpu/stream_accessor.hpp | 64 + OpenCV/Headers/highgui/cap_ios.h | 163 + OpenCV/Headers/highgui/highgui.hpp | 253 + OpenCV/Headers/highgui/highgui_c.h | 619 + OpenCV/Headers/imgproc/imgproc.hpp | 1270 + OpenCV/Headers/imgproc/imgproc_c.h | 623 + OpenCV/Headers/imgproc/types_c.h | 626 + OpenCV/Headers/legacy/blobtrack.hpp | 948 + OpenCV/Headers/legacy/compat.hpp | 740 + OpenCV/Headers/legacy/legacy.hpp | 3436 +++ OpenCV/Headers/legacy/streams.hpp | 93 + OpenCV/Headers/ml/ml.hpp | 2133 ++ OpenCV/Headers/nonfree/features2d.hpp | 155 + OpenCV/Headers/nonfree/nonfree.hpp | 57 + OpenCV/Headers/objdetect/objdetect.hpp | 1055 + OpenCV/Headers/opencv.hpp | 61 + OpenCV/Headers/opencv_modules.hpp | 27 + OpenCV/Headers/photo/photo.hpp | 91 + OpenCV/Headers/photo/photo_c.h | 69 + OpenCV/Headers/stitching/detail/autocalib.hpp | 65 + OpenCV/Headers/stitching/detail/blenders.hpp | 137 + OpenCV/Headers/stitching/detail/camera.hpp | 69 + .../stitching/detail/exposure_compensate.hpp | 106 + OpenCV/Headers/stitching/detail/matchers.hpp | 188 + .../stitching/detail/motion_estimators.hpp | 205 + .../Headers/stitching/detail/seam_finders.hpp | 259 + OpenCV/Headers/stitching/detail/util.hpp | 162 + OpenCV/Headers/stitching/detail/util_inl.hpp | 127 + OpenCV/Headers/stitching/detail/warpers.hpp | 515 + .../Headers/stitching/detail/warpers_inl.hpp | 765 + OpenCV/Headers/stitching/stitcher.hpp | 174 + OpenCV/Headers/stitching/warpers.hpp | 172 + OpenCV/Headers/ts/ts.hpp | 583 + OpenCV/Headers/ts/ts_gtest.h | 20133 ++++++++++++++++ OpenCV/Headers/ts/ts_perf.hpp | 532 + OpenCV/Headers/video/background_segm.hpp | 262 + OpenCV/Headers/video/tracking.hpp | 359 + OpenCV/Headers/video/video.hpp | 58 + OpenCV/Headers/videostab/deblurring.hpp | 110 + OpenCV/Headers/videostab/fast_marching.hpp | 103 + .../Headers/videostab/fast_marching_inl.hpp | 166 + OpenCV/Headers/videostab/frame_source.hpp | 91 + OpenCV/Headers/videostab/global_motion.hpp | 141 + OpenCV/Headers/videostab/inpainting.hpp | 200 + OpenCV/Headers/videostab/log.hpp | 75 + .../Headers/videostab/motion_stabilizing.hpp | 106 + OpenCV/Headers/videostab/optical_flow.hpp | 120 + OpenCV/Headers/videostab/stabilizer.hpp | 187 + OpenCV/Headers/videostab/videostab.hpp | 48 + .../Libraries/libopencv_calib3d.2.4.3.dylib | Bin 0 -> 669000 bytes .../Libraries/libopencv_contrib.2.4.3.dylib | Bin 0 -> 1101820 bytes OpenCV/Libraries/libopencv_core.2.4.3.dylib | Bin 0 -> 2198644 bytes .../libopencv_features2d.2.4.3.dylib | Bin 0 -> 761624 bytes OpenCV/Libraries/libopencv_flann.2.4.3.dylib | Bin 0 -> 544152 bytes OpenCV/Libraries/libopencv_gpu.2.4.3.dylib | Bin 0 -> 164112 bytes .../Libraries/libopencv_highgui.2.4.3.dylib | Bin 0 -> 1317148 bytes .../Libraries/libopencv_imgproc.2.4.3.dylib | Bin 0 -> 2155152 bytes OpenCV/Libraries/libopencv_legacy.2.4.3.dylib | Bin 0 -> 1135380 bytes OpenCV/Libraries/libopencv_ml.2.4.3.dylib | Bin 0 -> 480272 bytes .../Libraries/libopencv_nonfree.2.4.3.dylib | Bin 0 -> 116400 bytes .../Libraries/libopencv_objdetect.2.4.3.dylib | Bin 0 -> 572376 bytes OpenCV/Libraries/libopencv_photo.2.4.3.dylib | Bin 0 -> 121892 bytes .../Libraries/libopencv_stitching.2.4.3.dylib | Bin 0 -> 575788 bytes OpenCV/Libraries/libopencv_ts.2.4.3.dylib | Bin 0 -> 681940 bytes OpenCV/Libraries/libopencv_video.2.4.3.dylib | Bin 0 -> 255056 bytes .../Libraries/libopencv_videostab.2.4.3.dylib | Bin 0 -> 241808 bytes SerialInterface.h | 2 +- head.cpp | 8 + head.h | 2 + interface.xcodeproj/project.pbxproj | 595 +- main.cpp | 116 +- marker_acquisition_view.cpp | 81 + marker_acquisition_view.h | 41 + markers.cpp | 261 + markers.h | 84 + 171 files changed, 84111 insertions(+), 22 deletions(-) create mode 120000 CVBlob.framework/CVBlob create mode 120000 CVBlob.framework/Headers create mode 100644 CVBlob.framework/Versions/A/CVBlob create mode 100644 CVBlob.framework/Versions/A/Headers/BlobContour.h create mode 100644 CVBlob.framework/Versions/A/Headers/BlobLibraryConfiguration.h create mode 100644 CVBlob.framework/Versions/A/Headers/BlobOperators.h create mode 100644 CVBlob.framework/Versions/A/Headers/BlobProperties.h create mode 100644 CVBlob.framework/Versions/A/Headers/BlobResult.h create mode 100644 CVBlob.framework/Versions/A/Headers/ComponentLabeling.h create mode 100644 CVBlob.framework/Versions/A/Headers/blob.h create mode 120000 CVBlob.framework/Versions/Current create mode 100644 OpenCV/Headers/calib3d/calib3d.hpp create mode 100644 OpenCV/Headers/contrib/contrib.hpp create mode 100644 OpenCV/Headers/contrib/detection_based_tracker.hpp create mode 100644 OpenCV/Headers/contrib/hybridtracker.hpp create mode 100644 OpenCV/Headers/contrib/openfabmap.hpp create mode 100644 OpenCV/Headers/contrib/retina.hpp create mode 100644 OpenCV/Headers/core/core.hpp create mode 100644 OpenCV/Headers/core/core_c.h create mode 100644 OpenCV/Headers/core/cuda_devptrs.hpp create mode 100644 OpenCV/Headers/core/devmem2d.hpp create mode 100644 OpenCV/Headers/core/eigen.hpp create mode 100644 OpenCV/Headers/core/gpumat.hpp create mode 100644 OpenCV/Headers/core/internal.hpp create mode 100644 OpenCV/Headers/core/mat.hpp create mode 100644 OpenCV/Headers/core/opengl_interop.hpp create mode 100644 OpenCV/Headers/core/operations.hpp create mode 100644 OpenCV/Headers/core/types_c.h create mode 100644 OpenCV/Headers/core/version.hpp create mode 100644 OpenCV/Headers/core/wimage.hpp create mode 100644 OpenCV/Headers/features2d/features2d.hpp create mode 100644 OpenCV/Headers/flann/all_indices.h create mode 100644 OpenCV/Headers/flann/allocator.h create mode 100644 OpenCV/Headers/flann/any.h create mode 100644 OpenCV/Headers/flann/autotuned_index.h create mode 100644 OpenCV/Headers/flann/composite_index.h create mode 100644 OpenCV/Headers/flann/config.h create mode 100644 OpenCV/Headers/flann/defines.h create mode 100644 OpenCV/Headers/flann/dist.h create mode 100644 OpenCV/Headers/flann/dummy.h create mode 100644 OpenCV/Headers/flann/dynamic_bitset.h create mode 100644 OpenCV/Headers/flann/flann.hpp create mode 100644 OpenCV/Headers/flann/flann_base.hpp create mode 100644 OpenCV/Headers/flann/general.h create mode 100644 OpenCV/Headers/flann/ground_truth.h create mode 100644 OpenCV/Headers/flann/hdf5.h create mode 100644 OpenCV/Headers/flann/heap.h create mode 100644 OpenCV/Headers/flann/hierarchical_clustering_index.h create mode 100644 OpenCV/Headers/flann/index_testing.h create mode 100644 OpenCV/Headers/flann/kdtree_index.h create mode 100644 OpenCV/Headers/flann/kdtree_single_index.h create mode 100644 OpenCV/Headers/flann/kmeans_index.h create mode 100644 OpenCV/Headers/flann/linear_index.h create mode 100644 OpenCV/Headers/flann/logger.h create mode 100644 OpenCV/Headers/flann/lsh_index.h create mode 100644 OpenCV/Headers/flann/lsh_table.h create mode 100644 OpenCV/Headers/flann/matrix.h create mode 100644 OpenCV/Headers/flann/miniflann.hpp create mode 100644 OpenCV/Headers/flann/nn_index.h create mode 100644 OpenCV/Headers/flann/object_factory.h create mode 100644 OpenCV/Headers/flann/params.h create mode 100644 OpenCV/Headers/flann/random.h create mode 100644 OpenCV/Headers/flann/result_set.h create mode 100644 OpenCV/Headers/flann/sampling.h create mode 100644 OpenCV/Headers/flann/saving.h create mode 100644 OpenCV/Headers/flann/simplex_downhill.h create mode 100644 OpenCV/Headers/flann/timer.h create mode 100644 OpenCV/Headers/gpu/device/border_interpolate.hpp create mode 100644 OpenCV/Headers/gpu/device/color.hpp create mode 100644 OpenCV/Headers/gpu/device/common.hpp create mode 100644 OpenCV/Headers/gpu/device/datamov_utils.hpp create mode 100644 OpenCV/Headers/gpu/device/detail/color_detail.hpp create mode 100644 OpenCV/Headers/gpu/device/detail/reduction_detail.hpp create mode 100644 OpenCV/Headers/gpu/device/detail/transform_detail.hpp create mode 100644 OpenCV/Headers/gpu/device/detail/type_traits_detail.hpp create mode 100644 OpenCV/Headers/gpu/device/detail/vec_distance_detail.hpp create mode 100644 OpenCV/Headers/gpu/device/dynamic_smem.hpp create mode 100644 OpenCV/Headers/gpu/device/emulation.hpp create mode 100644 OpenCV/Headers/gpu/device/filters.hpp create mode 100644 OpenCV/Headers/gpu/device/funcattrib.hpp create mode 100644 OpenCV/Headers/gpu/device/functional.hpp create mode 100644 OpenCV/Headers/gpu/device/limits.hpp create mode 100644 OpenCV/Headers/gpu/device/saturate_cast.hpp create mode 100644 OpenCV/Headers/gpu/device/scan.hpp create mode 100644 OpenCV/Headers/gpu/device/static_check.hpp create mode 100644 OpenCV/Headers/gpu/device/transform.hpp create mode 100644 OpenCV/Headers/gpu/device/type_traits.hpp create mode 100644 OpenCV/Headers/gpu/device/utility.hpp create mode 100644 OpenCV/Headers/gpu/device/vec_distance.hpp create mode 100644 OpenCV/Headers/gpu/device/vec_math.hpp create mode 100644 OpenCV/Headers/gpu/device/vec_traits.hpp create mode 100644 OpenCV/Headers/gpu/device/warp.hpp create mode 100644 OpenCV/Headers/gpu/device/warp_reduce.hpp create mode 100644 OpenCV/Headers/gpu/devmem2d.hpp create mode 100644 OpenCV/Headers/gpu/gpu.hpp create mode 100644 OpenCV/Headers/gpu/gpumat.hpp create mode 100644 OpenCV/Headers/gpu/stream_accessor.hpp create mode 100644 OpenCV/Headers/highgui/cap_ios.h create mode 100644 OpenCV/Headers/highgui/highgui.hpp create mode 100644 OpenCV/Headers/highgui/highgui_c.h create mode 100644 OpenCV/Headers/imgproc/imgproc.hpp create mode 100644 OpenCV/Headers/imgproc/imgproc_c.h create mode 100644 OpenCV/Headers/imgproc/types_c.h create mode 100644 OpenCV/Headers/legacy/blobtrack.hpp create mode 100644 OpenCV/Headers/legacy/compat.hpp create mode 100644 OpenCV/Headers/legacy/legacy.hpp create mode 100644 OpenCV/Headers/legacy/streams.hpp create mode 100644 OpenCV/Headers/ml/ml.hpp create mode 100644 OpenCV/Headers/nonfree/features2d.hpp create mode 100644 OpenCV/Headers/nonfree/nonfree.hpp create mode 100644 OpenCV/Headers/objdetect/objdetect.hpp create mode 100644 OpenCV/Headers/opencv.hpp create mode 100644 OpenCV/Headers/opencv_modules.hpp create mode 100644 OpenCV/Headers/photo/photo.hpp create mode 100644 OpenCV/Headers/photo/photo_c.h create mode 100644 OpenCV/Headers/stitching/detail/autocalib.hpp create mode 100644 OpenCV/Headers/stitching/detail/blenders.hpp create mode 100644 OpenCV/Headers/stitching/detail/camera.hpp create mode 100644 OpenCV/Headers/stitching/detail/exposure_compensate.hpp create mode 100644 OpenCV/Headers/stitching/detail/matchers.hpp create mode 100644 OpenCV/Headers/stitching/detail/motion_estimators.hpp create mode 100644 OpenCV/Headers/stitching/detail/seam_finders.hpp create mode 100644 OpenCV/Headers/stitching/detail/util.hpp create mode 100644 OpenCV/Headers/stitching/detail/util_inl.hpp create mode 100644 OpenCV/Headers/stitching/detail/warpers.hpp create mode 100644 OpenCV/Headers/stitching/detail/warpers_inl.hpp create mode 100644 OpenCV/Headers/stitching/stitcher.hpp create mode 100644 OpenCV/Headers/stitching/warpers.hpp create mode 100644 OpenCV/Headers/ts/ts.hpp create mode 100644 OpenCV/Headers/ts/ts_gtest.h create mode 100644 OpenCV/Headers/ts/ts_perf.hpp create mode 100644 OpenCV/Headers/video/background_segm.hpp create mode 100644 OpenCV/Headers/video/tracking.hpp create mode 100644 OpenCV/Headers/video/video.hpp create mode 100644 OpenCV/Headers/videostab/deblurring.hpp create mode 100644 OpenCV/Headers/videostab/fast_marching.hpp create mode 100644 OpenCV/Headers/videostab/fast_marching_inl.hpp create mode 100644 OpenCV/Headers/videostab/frame_source.hpp create mode 100644 OpenCV/Headers/videostab/global_motion.hpp create mode 100644 OpenCV/Headers/videostab/inpainting.hpp create mode 100644 OpenCV/Headers/videostab/log.hpp create mode 100644 OpenCV/Headers/videostab/motion_stabilizing.hpp create mode 100644 OpenCV/Headers/videostab/optical_flow.hpp create mode 100644 OpenCV/Headers/videostab/stabilizer.hpp create mode 100644 OpenCV/Headers/videostab/videostab.hpp create mode 100644 OpenCV/Libraries/libopencv_calib3d.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_contrib.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_core.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_features2d.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_flann.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_gpu.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_highgui.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_imgproc.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_legacy.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_ml.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_nonfree.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_objdetect.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_photo.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_stitching.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_ts.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_video.2.4.3.dylib create mode 100644 OpenCV/Libraries/libopencv_videostab.2.4.3.dylib create mode 100644 marker_acquisition_view.cpp create mode 100644 marker_acquisition_view.h create mode 100644 markers.cpp create mode 100644 markers.h diff --git a/CVBlob.framework/CVBlob b/CVBlob.framework/CVBlob new file mode 120000 index 0000000000..8afbdd6612 --- /dev/null +++ b/CVBlob.framework/CVBlob @@ -0,0 +1 @@ +Versions/A/CVBlob \ No newline at end of file diff --git a/CVBlob.framework/Headers b/CVBlob.framework/Headers new file mode 120000 index 0000000000..0550d8afc8 --- /dev/null +++ b/CVBlob.framework/Headers @@ -0,0 +1 @@ +Versions/A/Headers/ \ No newline at end of file diff --git a/CVBlob.framework/Versions/A/CVBlob b/CVBlob.framework/Versions/A/CVBlob new file mode 100644 index 0000000000000000000000000000000000000000..bef77a12cb1e05951288f62962c4aecf344bb254 GIT binary patch literal 278296 zcmeEv3w)H-vG^-)kS)Anon{ zUiSx?J!j6$nKNh3ocGyt7WiX|M=%y2iuUo!mGDU@yrmvb)5bt{KAIS3!*a*vxt;cFRwis1o4@v`2XPWM>+hj48OtfAj7{goHmB|U&wF)!=((r&3b9&@JAWm#Cl6{__GX0as1;P zKEUul8UBvpI}EG1eQaX*HHL3-zJKHLoLfY4wljQ);p+@na=U2adio59-KPQUxYhv^ zW)l3DJ5w*mbJ|CSOSKt+@nWVYrXskItg_R~det z-S z!>tT`obED)4|4qX7{18xj|_{>A^sOKtYg^B@Oxa|Sxx6!3k00lB|ITm^!i5~-@P3E4Z{iN6Q2tiUd6DK)1S`pMviY{eMVT1H5?wz>5g7N{I+xYJq-Vgp@Z@7 za=ZB%ho5A45yQibZ{Tn}2>Rr@M~B3pxEI z+%C`N^3CP=F&tjR>9=yaX^j8T8C3oO4*!C~quKsnW_Leum*s7*49E^eV$nhF@TKfZ=h57hOf^S25hi@Ck;m zG915z(p|`~mElf?=Qa?2JHzQ-3g5}FU@3+D3~yz4km1=^6Mh}Te`EM3hRYfWe>=lL zhSNVn@fS0^f#EKO1{}{vHGOW9n z;x{n-Jj0(coUn@URSdt$@U-hF{#u6jFf6*B;lxn2@SFhAZDzQg z;TIY1VfZ4$-!U8?B)T&hmM~Npu4cG_;e8BuGJK5Tiwuu292FwI=P*1N0hsyQ(QrP) zLv0lPD8px3C|twg2RM8e!*^Q=zm~&KbNFtCA7vO|Si^7?r=P~*pWj4$F5&p?9A3R5`Na=PkT+gtQp@ZSt5T)x26Lc}(7dZYa9ItZtE)HMG@V#|J_g#h&h8J@B^Vbvp z-FAW>V*G!urSMlc-G4Lw2Mps3%NaTtz7Qq4TbTay9F8zNljG|+d@jScxZXM$maGTR zv{jXX4ZiR#p>P-jRe=qS(QqWLhkUK!0OP6y8+-wOt3MVD27{P*@rqSTs{$KZTK$no zWkYoB+EA=I9O;PrBY}{zL8)vNQTCKo{>Y|qTYIagWNj#}wMSdSflc*GeI;5=sI5J| zN%KcGDT>mFF`kMw{*DmFa605l6nS#hJXJzG&EL8<8VkqQwQ2E_$i1|LxCcU!cqrDP zg(Knk$;+T|N#GESuV{^~sf|YB(axBsba5zN zyUrhu)JB6LWdj=J(h8BnQ?fV|S6W-c?Hx8mS#4{`A7c(3R-!WG7maKPZB#p3TdlbA z#i95W(auOP99ersbfXnnp=^wYVv)vBEZi1~hhj=&jaJ{@THoeh8(Oo*ELZiSV6Yyo zHR5k&A(UlHtmU$#$8refMtXBW%NNB$ert?3xI+Ggbyew$2kSx` zvXE5OcPQu>EO}H5k;2jJ1+Bg^+}etDWoBDl%P!24fAv+-aDa8V$(L5v3I}UqT$YZ8(x2+X^v~ ztyqI6Z{$RhYC5v+3QskOL&t=<9V>uhP{m$i`j`Cr5P<+UF(pF;Ao z7N&kScS1|wIZRTkSIJd^icrko5z1EK-0)AFL4M{^8IE*>V)0?CDT`S)X};=wjH|Mv z9T&o~6*kWbLkoSsO2a`dl{u?0c{Z4u_RfxV+8Tf0ro7u2b@_S^lB;rSJ>2+=L2Yi! z!;w)o2S;s9G_`!$nZ^>i#H7hE4Xi|$h%H~NU}l%bHZ@kTC#or@4(Tbry>|` zX$i$bkpQ^`_JY26rrrn}j4$q~(F`9P@W(@Iqp?jVL6j<8KFJg-ou-h!c=g}E&{Vt# zs#OjmGtIkwrJAxv4vq=(cr0>rTjK`M0vl>uqmhs=v>rKD1u5vSx88Ze1<5;%hyMTw!hWl-6$WhT44bXw1KsMqg=(<=Sos^yAI&*4$xz_CY0vmYkUa!dyykXjw;s@UF|C~20tG&8C6!Qn8F|kdjw7T)Qq3>gcfLo1VbPI0H`4IUQ%ux>{zfv_>Oq{kY&| z%R55~BlAp1J@*XAtl4}08&~@A@3q;oH&SHC-h@og-azKc{#t+B8;-2Zkv*r#oV`S5 z%$||ivkynG^x;VM((5TQWp5xeWUnK0Wseg2H)bz=SfUvAR|^+KqjnBgi?MBGa`e7v||k4l>x(UB@Lsu z_zW0sjEu?DNC=h1EE%cmG0hr90$-+@@@1?kQ>>|`e3@&?6m704Q?$9Jd|7JB6mO|1 zU$&a^WvwX_##U2SjJc-F7;8;gG3J``Wv?k;uA1@ zpA#Utw)ReFd;A06^MUXAzvg>rYPc-a(b*dJl!)sIxO%E@=$P@9SB6{he!()I7H+f+ zsKRtDXsLyuYEdvqXY0v%;h@=;0v96jpgtYzvQXoRE8`}E&!h2{mRTh8>3chZ_jWAF zOY>l9I)85`&cITiZCcXm^Gj>2z9wxJO5fx`<(Y8)L!(cVy3?1tX&P%3!-g;<*5{$}-iJ*sX?TNY8p=>lI!Z1Hq@(O= z(r3C^)>1MgFl0(6HiXK6)1^ws*~Q8<%Qf=Ngw?rc8M5otS?~sZnOtJBT}HCjRn}xi zM%ftYO3I!>sw!&=o5Hf+Nnn3vzX(z~>_resIHDULlE`vdrW8**@uBbG1tq6dM#tIm zN=~k9_@uhe%APc6(^GJ`>oUQS=_hDCy_`!B-DTxUl&MHnnih%1+DxZl>l>GF#rWct z8$toR809O~@X&iH-V30vC;cMM$T{hUd`&`Uj zgI-R6d$s1f!p=Ps8qOu(xEg4#Kgv)VwpeO5mI8Y zXpDV{ErACQmUOn6r(3oZc*We;)IhT{#V%fkO!JwcS@W<>z^usXx@aC#J}V6#-CNq( zwk8x)(k6)+ByMlT#zeK8F>+&8W+1&bG9SrWURxP zq|eAEgQbs1j12U7XP&d^jzo%_jPe#ILs3Q|Lhjn7(8msUleyGF%)`QAh~#kAmX&GIbcr$2v?a1#+s(qta_cBNv+97q z-5&_+4?N^ZnSKpCOV;6y3e7)E%vdd%3&_YU1!HWs0x>#6L8{h@i)^v_pn~(xB|$k# z_xYtxZFg!c@icxj$BA_*dH=>pt518)adM$a%o5CtWHLf$FDF;zf(6>lF=)(|hP70@ zlWV+mAR5Q8G`jg3w~FOWXS`RJt6}I^BsF9nj;u`cYL@36JUpu(mNLhi0>jZ#<>gtj z+EQ_SD$mqLUE52}wwYJG$g~!vlHuq#Fb$-*(VUSadp*eHcK!2eL00KUqIJ%Lv^|X6 ztmR=+nYCM{X@O|_CSEA0Z&*{J;D4oB#v9|^a!pa1N;JA4TET5Ao(~6f6Oy)ElXKZj zv>tozrpZ(0;e>SMy6f~+i1myLT+dJ%1th5zeRgwla?a2RD!A@Ixz*~koou;z%vye* zaXB&WU#naF8of7D8k@AnC8*eHycjJzfQmKY zh(ESTYw3)Lo96Yw;2Law+y%u;OKrKQH84+!OrZ_i#wsakJ|!r});x^#BW4$rYA6$z z!+Cwshm7!ox{ymL7hmSct~-gmKYS1IPC1u z3Ty1O$lNnpALFuEogC_j=jYOVh6S5e9|pE<*^;37%>b!APLV;cj3Iqy<80ESX&R(& z^7-P`}^XWx2!3H_>2MdyrmlJjkvW(+qHEefBUdrEz^i1 z=rb!=yhu`mW`;SD&U|s)&k@f(oNz9!>Dl0#?;^~jiP|v@Ui`ruJ3He4fQLQ$*3 z`A}I&x^ZLI?8m=23X?h$YKq4?jYnLlNF&K9Qw<6JRrv0MM;ng66jjiaWa6G%(-|qU_=p-g_~pSlxiYDSOs=Q2#9G`m z@9NReXsn(x8xeZe^u(o_X3c|~?68e7kLt74Ailomp=a4>u5e=cX0A->;mJ1h%E_d8 zqZbi=KTmqBW|^<(3#@eInKj$F@nq=M+%z(}r1z73W))jneuiT<-%EWSTze@KCrD(+pf7>=PEID7`+z*<&t$|T zbZXXXPMN9W;3W3u!sSGhH0a3| zePza)g)g2yj>mC!`lwt_hxXWnP0HFARcm$hEXhf4PoO7HUrr1B>(|8fg@ZSB!WC=ze*uL-rLWwmaZ(P2o>$unU=U!&*aUF2JmGq2K8OnZgWaP>8^)Fc`e z+49RWmGPKA5Q>?kHWzzM$GTWI9VH-JLODPCLuby94F}U0IZwb{gsZWImE(Y&lC| z9B4~BOd;v=J5(lj0c=!RPDt5O*>z(&mPDHsaxFj85hnW?6C3Zmj<3Ox)`6e* ze;nlRx$XYXy{pdmX);yS-)Cz(;|`lG`;qNp$~~?T?Fp4>mJJ2jo_Q{oaXH2_q9y!{ z3(K;cnWp?Fvb>ya@z<1*nD~nWP#Km+(^koh9lFe-W>icb>lvm=DKkEiSuafj;5^TV zJ*CCQ$5U2OQCd-1T~%3JTv6gFE-o%DL9C~wq_VucxVWOcth!nxzhT0f*65n~Q4pUO z{`&j(#sN4+1Aqen5Ed8UAHtso0J||V%jZ{dLM%iY0O;rNH~{f~B1MCy#X}nzhaXLA zQdTxG()4-gQVJkHA;u#8M!k1}2FVv=v;tpH;&E=@ixZy@S#)gs8y&JXH{(PvH} zxKJg2J8uQp3;+|;__d0M(_5=4y`A5VRTN)D0Q#G$F6D=-NB?MA&>#1k<*QrNv`9z8 zF6P(BQG+&qvR>r}w}m1dMg`mDgX<`zly9L%<<0idv^5yV*a)e#n|QB$^5Ek6839oNhp3w7n%U4F@2xX zbzDzVWcog(YmnpjcNIARFh`_*6SI8NHKnVV*{S=5=!o0jbqFZsz`CTeFQL4qCY3%l zp&YUm_>h`V`jX0P31#2F8jGNC%y&NWq}*OaWBB&b`DgX`Yj*$&rtdlSlFQW;DrZzYwtT;J_g zx9%%i{dCKG*V(s;a$C5hgmcvyiKLXzmZQ6{;}DgPmXYWjRFmH0>h}%ncu`Hfkmzn5 z8XBU4rM$aMW}qgOL7lObG?m(j)gp`~DM%Af3F7_T?L$M@4B$wD>ZvQaaNgGA4%dBD zDlB2VObVQuj7h)$_$V~Z3KDX^nt1dE!TotPnZj=*R#Mr%4SKq7CVSYf?vb8F*tfDB z6R1h=4z;Gw^{L+3jvnQWZOR)9AbyIP>PB$|ftuLQ_N^wB-aKS`!Y}ZmrnQlEk z6nA4AO7;{w021EbWX~k=WnZ!f+i}9%m+YF3Uz7t!CXObTkg>t331zpMRCc4ObcU#X zBTr!-qO~JqBZSfQOkMMYF?)|EwxA>wSE9$EJS16W zS@w%2f{c&cOG&9!6y|Id6YZ9lrB$dYQH$t45)RP{tgb&((|CEC@^bd3LB@lABALMQ z*7SYcMP(Lss~j2_Pf5_jVx4;rB}BC+yW^sHA|WcpAQZNsyn=+7B%!=Qgs&uonV54# z9oF=1<{X9XDE&r`{Y2QG5N2i05v$%HBwKsU$Pqh*<4Nypi5^6e+Y$`~T_0M2Xb>2v z>2uwE4YmO!5E2-=nA?Zn6I)O&!Xr1rV<9D;V)JVcNu@WT92Wi2QS406^B+ivzUi=< z^bV?t=hQ9xu+go4+S1SOSFqy~a@Z=J{SWph=y`jT*S9IJFM#+}C^MQ$hE70DCXXM zNP6egCw}csTvgqWTwUxV9R#=}neUTWp zED}AQn%KNsP5eboJcHVPsxI-Yk~r9q^zJ4Zw(LWJuUpj2dfsxhA1hbyIF;kNIH^>T zaLW!g@ey>v$H}{O_r}W&v%${pL!_7E5I+OICE?QT0MO|a(aD7?g_{#J>K?32{OIE= z`xxn%Yf5+V(9qD4^9$0=Pc)Sm63?(cyGw?KhLYlMqI-_`UXD$#d!Dd&5hXmulcJIz ze*`TayT@cVe&rrrO>`GhdI6+ii2|cH5Hio*n@IKQ4F;Qm9=%WWCEe9SLqo@0MPvSS zH9(8&jc3)wek{w*_eAu41gE5Syf-w|;+pf^F;~%p_??Q9>c*}X$-iqQzn{qoPDyPy zk>htF&o;7ClaEr-9!J?5lA?RnZ7Zb?4Gnn{SCup*Qz(5y^1+27X4M>T;;P~X>O0Za z8j`7Xl(wiLDMkei$p_I{dlRb)y@?5m=h;PK0I=u>IOnmugRlk&K!okm%n1-q6sIv(QFU%Ka^_H+osGU8I;4!}}OMh>&%v>^7y} z$mtp0$M8XfvJNmW(J^+n>#Q3jjhY5F^m=B(EvC3I7+5B0MS+Sv#UC;W>%=OT}>%HYSO!1O?Z3MREmoEg34@q z_V#%1*yg>X>uqp#{|ici17h@^$$MxlAljW!%+J-tb6kPkXuN}J>b}EBm3p*K6yXIN z-imKOSCjYAH}a{+Tt(d?shCgE{0QR&9mk2eRLb5IjR87M#f00Ct^!%0%Q)tmq8vWv zDpC%sTlS$_)BBQQi-QyWjXn#M95G|$(2pGYO}w`5D>lZ3T8#pRCcKeWyioW&Ol8l+lAUC&pVmtojT7V z@6Kbc>-&zmrg(RzFIaI)!CLxu*y@WjqRV(#muBSS}CtYH~E6 zcs{ZG-h`LT*jX)ut)sHFGa z#B<0@tbvH4SU4g|ER~3?yNSLx^p)RJBNlgFPDP_Q7$0e}GTt%W6vkNEaQ9g?x)KG9xh*}d8 z=&p}*k?KT0WS+$vs!_Hlo5lH zn5$@c-!a$qIHp*Rq171r=`q)}&mD8El&*J*f|^zi9doVfm1c=ei`|_OiR}v`k(0L% zIpCLG`Z5LEe37yFWarLgB~DUsSgRTn z{~ba-sT58xO$%lF#V8X$`@5&hMc~)RphiS5Bi34TuXv$vwX6f<>c1-1f$gY?71V$ThSW@FTap%Y7EXD5)W<)c#?W=!PHH}bd6eLvQT(svFaFlO|3npVTiGaF z4#hd**5mN;tLQ74(A+-Za^~*Eg~SJ+y6q>fy1mKNY+Ovb4^4OOi^8MKeQxX9u+a73 zGYfC~v#ahoM_)qt!=(7T_4p{)UC2gFiNz>dLl~4(ce5`P^9-E$T#oBdiSDCfSz=0e zzn)mE5B4RLUa>;DPq?C_7neO$T)094W73yGI_$V8k10LuEzPr1{H`V+)x|M9x#7^% zq*%Yo!-K80d$2=ds*gKa_)U5+>`+x&D#AS@m!X+VDZYQ4yOBILkJ1D>;b1b2d6h zdtD#ky2iCQN}Ny*C6z-7To-;Np}fR(&3n{IL13zDq#LHLO&zms%-y_~O+LWyl=R^i=FCmHQ70+CSG=Z_u0gAxVuZEp9~`< zu{YSGyt++!6}{|!c!IsJ8cUp z`?q=biv^WVUQiKLKxDm~EY0zoj6H4zuxmX!P7p(9`3X@CA*lkz_Z4=S9QC8OdY6=%-y!~pzdq_>8 zf43~izP!!*a@X6?c}I3T7R5FWIq2rEZr#_YuY6K@X~ja6R~+^b`(wCagL^c%Twz98 z8x1Z98rm9wfqKy|;a20pEDO(e61ns!8uo!BxY2G>y`==QX|h09>br!PSEdszHUi8G zci;{t@g&QEft8ez&kL@2L{~M;Ca^IY1W|YNC$37jlw^R@VpDI-|FX#WmU6YjCiv*JO#W%M$O&5`Wf_ z8R+2lzB@E@ zrbuTHY4G43#65r9Y{Y$Wf<;?+AD{R0=}dq;{*LK+H=lR)i?;BFzT5{HOo~oFk=p&v z&`?sGRZVn1_KsMlIi3`;?1)krn-C)dArJRma$tk=ph;$gnA7QryvOO#DW54Ev>z;2 z6W!b2(HC^Od)~$MB>bD`?h@bc6yI-qSF9Ujl$uKQqMTyJrzY<+$i=wSJfe^(_3aG2 zv(Qb{)C2P&yF#%&q6~zR-49@Cl`pEv?j0f*<$fE97_r^2CImjId@&(NjyyHSG^nC2 zlR-6EUuc?{ic>*}MKmU~&J#`2WJKuaG`m0j?$FRS@pl2ZZaWK)`K7vdqFyAERQE1Z zP>4I2nQNlE=bfP;p_Kbcvw|{pAM;a_k75EfabK4J&#B22ev@PT3=;vt$EI+OV=5fE zl9C@Wl)O_-*^=RMp|0ddSjm?cNG00~X>i?VaNV7jOdRBvRkx41zBbz6YS*MFH`h+8 zd#5>w=e#mj79D_7lj0nln&=ix(sU)cCdA=7JhCSy2O^InhtU(TnqD#r)Mu}2agBv7 z@g*)!wLCnZ1T~eS5@0*qC~MA?HEwSu#ovTDW|8a;iX))c(_j@3Td1iN9-q|Po59AW z51G!|+K5)yphcm4p+y`(?v4(jVdLKg(5Z^eZK;9R3=JGYLlNRgd!qFb*1>D6gYr?P zDz#J^9?ToBrtTv_UdGB2_|!UGJ$O(aZQm4=qC^fZv)J^Yvrws^PG{Xw$9l zrZg71deo%Sqb8KDT!w^)RMk|sI1ic>$5+*aI0tI3Svt_$v$rR8$9qFV+r(e7F8XI| z)M$9}jK4gMy=5Qvop?Qoyc{_@9Bg6BJn}5g#51G5lO6S7cGMkNQ3LVJ84dWe#4pVf ze@T}3IY;LFb!g~_AOCz63-{9BhK7!8!av`}Kcg}3Q#ffmAhbmaVe67>Q?zQVua=to z7~Qljw|Z>zWb2hFadXam2T1JDc7Axkiz^A@XgeNY?6q965zC0mF0rKd@R3(hp;)uV zB{$o;?EnmXLa@V&6v!_7*+tPs;1c03v61rdk#0HH77UIugX{}Lf&nh)?@B616UxH_ ztA($pm3NFr4!Cgm@Ia|GIL8`1%Nm@1g+ z8n!98^KMyB<7V3bmX_`AWP}2UMF_$CePbjg(NZJ0Lyrtf%WTyB9 zpFqH;8ek>Mb|=1}aRUG>k;$ZNlKv8jz%Mg!Jscxim>w6%9g$4*n?`qH!sU`2zf1*y z19+ZbngihD0Pq?HzUUy#I~c%gbXdCr^Ggm)9}VDGI*KZ39tJ+cfjSI)VH5zau#1nB z|9?7vslOAo+qt7J!16S3vBVd&!f#>(zN~;+7oVZeD5DoG27t4$QqU&x#Sc{CsTkM> z0Lx_BR{&rV0Gy9Di9?e$5+n8Y>n&X{`oEA6E|5n*@C(*mJwB0AGYUXlK*vvH6y+4e z2U#|c0>G=6;xlVBVLpL0myZUx5&-b*Cfq-oxIK!2hdG(R{FDPfMsA}DObq67;5Ue= zE&#xt8}Yes3~_rGxqW4f$>QD_LkwmV0yrj)1!$J#k!7X!hip(d@xd>sx?1Fqy2d86 z3^i)TiGTkY=+y)+!Qu|_6uh7_WjP-6SuM_7Vca-8hK2Eq3-KEw7Z>3-ypOhc zrhGR5Rlbn)Z7d!S;>BY=lq8|Jxy)xl_%Q>qHekKdEYB8kilkbY@E%SsoFp~cFXlv~ z=|ORb1K$)GwDGmhDRbt)b3R+X8DiypRh-`*#AT0~KKY`6sP)4_#-pz9_S&RLg1SdO z07TDcSZ;BjXzD~K7M+!7y;FxW@&YNoQGl~iTQ$&3k1h|M1TH709WMFS46m~o=cE#w z^G-(vF)fng*F}Z)x=k|(=`DoS$$FNq!6J!Nm07#EAY`jhUArSrFLSq8nf^u%or#u@;!lKy-w(!D$?dlXq5V&Odh>V>8}78wVT@t zhr<@Dc&XI%@o4&)WY{9j36RdCgd4 z*&^3@N_3I;D+jS-8#qTz8HeyPzI7uKq|ngxrD?pi{mX6MU*aLbCo}mQb?d$o>?kb# zpMH)Ot;Lk%lp;DZ+lKLWhy%$z4cj6rW!AE$&wpyXvpi7 zR4GQ|rM4hW$m&ItZ%kRA+%PO1+lRqHv?9Zw+w4xZ3Mr9cd#3&neO67M(c~@9Qb|v? z)l_qM-b`$s-~}rDm>an)H0~>&E7tat-oxLgOVCCc={k1*0PdyX#q55nFQb%+PFX5x ze^eH_ns1yIr6Nh|vieioL0I9G#Di4gT-RhAQK|{G(4-aVzhqvzA2PKlVXSJh(`aGb z)J#?^Y(Arpx5*?bL?55Ywy|FkEUm8B(9P~kzBLX1IZSq7>kc??qqR|;>4#l~95sEe zPa5x3@jEj7a?luAs6lm)Xs4(w*Ejd-rX=fG_Vc2>$$Bt6x$%mZxJW|QV%UMrkj?%{ zp5(CGFs+0iv<`64hEvNAsj1FG;(Xroq7@~U_Zr1WZthi+oqh7)9)=|{ZL~tq+*nJ4 za!4&Ev12or0vj@#X1~=m?R{H%S!BH-1Ka}WCpT{JHhFBbbtaY{9+<;47Fk5Y#2J)O z^<0x1IS!rATP)LJ9RqXJlz3~P z>o=aUF#M3MOhSCaTN(OGZ`t413u(w<^#;a?WZV%{4jthq4dhD@hd7=(9rJjZ%&A{= zr)Wxi>OF;w~AgF*h37cxUwzf9uH9;DqEU7(=3{mZfimubfb7A*N7f~0guo&?w6s}Tl!pm zi>yzSZQp>KDj~xok1p8()0>Z@1|Aytsafr^B*vbi`TVt2)@PJni@XcBp#yq-VNX?> zp!77%)a!cb3M0yOCHubOppnj;7WprrP_2IW^iJ-^3eScH1{Rm~g+Kro4wNE=x#Z z9ysdb4a2y3tSGf(vB80}b#pUJO;-~>ik8_KzOc>v0y;w!8k@PCEYpBbSe?wz{5p@R zHrV$nJh~8WhrXk;ePf&Vjjp#L?y@f{nMWQ5XTw7KMc*Z-3eW4kp_$IG59;sqv3$>7 zJeF|Z-mX77x_;&8kr%k)EvzJSd!&N1HAuUn*Hm?W(qFKGXbU{K{1Eli`_)9}ZgD$Y zKHnxhBI%8vo)f$F%62($G72iyCtqyY++&#sVKYrF->IfBhnLkvXP26I2Cu;Zob8bKI;K$`ACmb9sP!% z?z47?mjA#&9W^caz>s(^D5>ng#{YoWdKWkMDLCDV9e1P&H>y54@R+z|$&!g4WQ+0I zryQQ3cg1sXH8>2}+BB0`FJK;z4^zyPHKHq9sw|+@V=^0q6ri7yL zC=C@*KE1?`po!Nn2_ug6;9IZkWFE)oP5Jv~NsU)My!V@`$!fOPEa!gK_F&l#(+3v-ofM_v0)Z}urC?;2Ma9}PsoBP$| z^6hG}lcaeRpX{MW5%kA{5kvLOD`=(7;Tvo zSmzH%YNNr>TqK!ua}3{mA9?!rc&vEArNuh^n$~FGCaulCvDlp{r^TW8717Q}FdSKX zMRX(NA*VkO@AS7eM#GW#TmWTbJQRxcmOiu5(rLkcdI5rlI`wTwdR8%x+Tp_;sj@#n10YKh) ze~5?ig|G94vI2~77TrFo0H2wkEZ+PFqQebDNr%6efFGR2i%$-^drfp=TRu;^11Cs_ zqDVPMlHURoo!mmrr+je}-G!`484p*&g)HdBa!o76WvJUJYb?5 zNqZbnz3Ud)P!Lf08F3Kk=H0ZX|j+bNq?vrRfCe?mR)deI`0_9xhM+ z`HqS1+7Yye@(G4s_-A?Ys)tq+-AL^0izd2})ZZ&6x{>(L=@X6ejl>T&nCRr;&V20i zHWQt51oiNkiB3$8@~ro_Omy<(U_Nrrn`G!k9`?(JE@+|~iJT9Z=tk1tzGR}CHv)Z5 znrxJhfA%V`^3|E>Mk43OOmsdzHkgk-A2HG0%0KRv58Y7{-52xJ3-(*HorYfa4M$gi zEp%(aT`U*Qr-CN>*Z6theB^z=L{~ptzKqBGUNX_)Z+zh=pZq4djBxoM)~j+Rs7_WUxSHG9+b|9?lu#h>^JhEd+Y@1-ZIgRWIc1<3_~x^jX*C! z6P-`eG0e3N`hba!f0i~c`+Uhn$3It^7u}>&jPmi%spdsjXQKPk2+H>{6WvuKux-0L|USOD+_fV*}9 zz-Iv5RSN(nIlw)60r7q465{*GONj4H9RA29#COIe#P=^36W?E4OyM70Onkp~G4cKE z#l*MsV&XgHV&ePP8shuY8Vc{JA-;Fj5Wm$m#IK6s+#2FLzJ~a|TTOg_T1|YT)x>9I zHSzUu_|$6Rd$fx9{-BEZ-pBBcD&o7giuhhvMSML~#P`1{0PX>Bf3pJMQvmL56#&ly zxSJ~go&<1LR{)H5fcyLkfE0jxR0Ti+!2M=9$@9~4lIQMn!gp~vS`OfJfO~a0rC(YO zFwp^SSGl-t=pM)EUo0be?=2ITliV$30G|eMd&>YG2XJ3j2CyH%{b5d5Tt@jq8Rh$n zQi9KwQv6p-NgrDo#!3PHg8WJW{s`c9l>*!c;C{i8@jE;KzXWi9!b9Z_ zd5Ev#p?sz?ba<$I|6M}m`fLf6Ykdj8YzMeQB>)#Xz}-*+cDB9ix< zi%8!8xRCOD_(GDmki&nOOZmSvm-7GNT*`m@T+084bBO;01(# zmf`LTDF07gK>0UZK>43_0p)+o`4kSHPx)VYKILC^KIQK?pYs3whbaH#hlu|whBX|2 z_J_DV%qIBkY^sNE%%*y{Z#MC%oK1Yro=trIa2~-QpGWcEIgjYO7_L5#_*`)w@tJ)d zz&HoEC!R<4@|$zTrBe4Z=K}mEfctajQv12*TuPTXm)gNi=aQW-JD2Rc{9M30w(g&v zL-dcFL-co_L-gy<0T}N9claElUvdu7yE%UPImG8j#YF#bG11>qO!VPmfUyp6`-_Rb zwwUNY#PO4giT>MX11th?&pKP&4|czO7Qk5!a6f+*z+5cPSpah!;J)B2fYTh{cAh1! z7|t4XmiWE-S$~@aFd1N0-z;%2)BWXHlrA+3zzHyG%PdNF3#Y4_1@K`9xIMEdzsa*G zzh9q8`8|3j<#*+20L2b)zc!Qj{b;7R`#I~$nZ$4ROv-ojOyU=q3Gg8YxJzdO-ez_m zI#paJbw73rz!-oT*tgGefV=4wfKmszcg!I8&FKVxJdI!t!``XlTCw}{3~yt&fZZdTvTtBE~>YgE`SLRaN|8r^tZouQoZeS zQv7F}RBt{fKs|ul;RLwI0q#Fd2AJyr_Y;!=&US!1F&UBIZkkN_luo96CQqh({yGWZ z0tdJ^O(H$5pG5gxHwoY(0QW@IWv@0&>JKRl7rpE8ls{dNNB_vHx`{tAb0oB%K% z^Pd24o&(&ICWzlYci%J~;Bo->!tuoKl|rtkLVybZ+&2~ioa+F$%JEYes$j*TIE z_{A8qhaZn2dw6DyxHUSfX$;l>GX=zVWdYgK`~tvByO$EUHyV1nw#nAv00A~Gg zH1T<2G{9JZS@(=4d)~6xUhD0L;Rx^HUw*t_8|(8vT~Nd&?;R^H8o+ zs9nB21K>9R?x$t|yb9p{_Za|p1Grbupm9Xi46?J?GXP&IaGx;)U@!K&Gsyqr?R&K6 z-%h9TLf>>6ckG%@{r>&a$v(TLlf5-gr}B=OPUZc>H1f+oVffu?RNn2=0A2@hZ{qMx z(*WKCa4XXQ9t05W`C9<)p{W2b0=N%OCH}joQhpCl1^5MkyK5@IzXG^dO$C_Z0Qb35 z0cJSBefm^@(*b6UoeD4n;Qs3r5d40}@XJ%E+}Q4ah2zdC07n4aB~t)?0N|c5h05`Z zB9iyPy`SKaNk)3&<)_mwXGrtxGygvx`{$5!1xcLBT%;Qp2i z;Cle>ZWpEdh>Ox)%<0Z_0sIWWJ%-ca+T|Vq_Ya(uew!2EYXI&RhK){uM*-ZWPJr)V zJK?aC(;t{j^6#2V@;}JoyC;+UpO{SYpEDWYW&rmeCW#yT?!HN6r~f*M?Bw=IWFMO+ zQU4d2MD}nVhu@qio~yQCtRhFP}(q;GW2O9H&mCdz33 z;sk(A0Pf@jfSG9b69A?=zacz`bfxIZ->;2{9_*6{$J18^@J z4=@^Fmdfzb@c>@{a8DaAuARG|Ed=m5!2JchpYH(o!b0&lg&XhqzYE}gW*ph|H^z~j z<4Xc>1Gra?1Na--#W>Q(ug8);aPRpA0C&$=fFGkD91HL?fcw(1WXGPd0Cxbm(K&qr z`-L$8UjcCM8$;nc#*n=&W%!o@fFA<5pJ4dg(EuF)?%PHKbfTSc7^k-m0Ox5l#p55% z2#W!ng&am00C4{5RPk^_Gs3e0oCi6K@HznJL#K-87@85D1>n4&!wCHV&h@7PtO968 zh(m)LIE?UG0B6mq;!%iZgl7Uc=WrNd8-R206tSeyjBvCAoV#((4bY76F96PaP67BW zKr_NK0GwSMM)(^5r+SKbNv;{;>v*v26o8)qG$VWwz3g z6~MV}2EeZYni2jAz`1A!!0!N>5uOI%Ea5Q1BLL3vGXQP?Xht{_zTs5G$TY8i~bVB2;T&7&Ycc$ z44@g|bO7f}4kP>{fb);j09pZ>5l#be_H!8FO#se4(*SM+Xht{{z`2vd2*Uu*_Gtir z253h3Cje*tH1R8(%?PIeIIB5~a1RazrvdKyG$XtOz}YtypdX+a;V%K4pPLHsU4UkU zMF7t297gykfOF+kfL#F12)_y7ES?Im5uh32%>d5BQvlilnh{0;oV%s~90F)Y_%eWV z;}n3O12iN28Gv))6o5K_W`wn9?^6K&0MLx^_h{cm07n6u5e}d|7XiEk(2Ve>Xum}O z-vDSv_;s|`B7j=}nh|b7dvyV*0L=&&12}iP#Je@k2(Ll=bcu&enh|~k?b9V52Wm#R z2<_7aa537a3!nz=(Y<3i^gCqNU*;{@=bJd*(~ zLwP0xT#E8c23UygXR;WNIlCqUtN>_6xE$q~EY4>(BOHVBOcrklH6t8_@=OA_4cp5k z;a8nI@L?7QG$R~Bc_sl2qCAttc*I#U3GhkTW`y6v_An9Ps{qXiA42<|2=GOKW`tir zd!GpKF@R=-A4U6~2oOX2Ccp2TIT7&vo@RvqiuOAJ;PU{@2tSARIzc?r(~NL4+Uo>> zZ==0V5dF8adIG?|05l`K5AAaTz)Es0_;P3EEIm+xvLQ1cC^Pr(O)??76N=3?XghwKThmF z9!7gC6#cTZun=G;+GC;c^Uj0g0J_i~#|eM$ynh_vdpOMq{}b(T9Ke60J&qIO8|R#H z0FPn)j}!fZb8sx+`PXKIpThbd3vfTy|5(vaIlIOJJcIQ=R`eTA9B*$2XhwK9*8f<5 zr_dh9igBIu&=|mTvCRm7g!VWF;NQ_-juHKpbKMw#9<2W{qTh0si~)EA>wk>scb&%z z#B)2%2tSMUUm#w7X-3$I^tj7{@vr3jpF+{{^BSbIvRPeE+E#VG`?qG{AFM z|D(k^mUHK5fL?%Rgim7qj|R94>wmNuZ#eOF)oob+qs6$?iJko?vHnMke%Xn?bNU3< z|0prva&8|5(1-OuN~}vcSB?_Tbu}Zr8vW5I@eWTjLIwSi1K?Jye}`Bfa_({fRAc=+ z#Q5L2(E%{Z0nG><4shZRP0hplcZhkE6Q4r55bNI|#@|l7e0x6DKZtph6Ynt2!TQIi zjnZqkN z{0R>CaQF@m_j34~96re5?{Qch5(j{z94^8`;{fm$hZl1A9Sn~GILhUB;riYvfJv88 z`g1uvkHZxl?%}Y?;nz5P9fxOMPV^BD2RXb&rswdz63;g!zQW-za`^ig9uLsP@;=4* zgN*+X-p4xh>OdohM50_^1c>Nt#p4*d8zyemxM z`#5}WYN zhdVfYki!>oxR1k6G5< zUtC1|3pxMEbBTX{8>L^t<@+R;?~F4L-vzx~K3uN;=P2XVt+gpi#9OvISp2E+w ze1)9Ov^znA&1Wd5h2{W$6@%JL;Jn(`YSQ^rsQv9~B>F!I`|W}revuHzd)Pj2 z;fptmW|RF6;>(+OUIAbEE9ipD@XbPex8_X7W5){s7c;(v@t4ga{0)A>`#JxgaQhzMT*Lc)0yu&+Ye3_D?b_7VET)gekIf6dKBh=71!TXuHW5UfB3E@;#DqxG3##* z+ZX-_0^*-%yo<}XkjsZ|8u9In?_m1-nLfn%&*A(JvHV?}|4O#Mw;2Bd%Xb6YAI=|; z|53KDpRj$+FNj%C@t?q~j2GX3*Re;?ELGkukd^8Yg1 z=hQh=Kce6Ou!q~o z`+bS+H_7(9j_vnj+&(_W?PCSE50%@;61LA`w$G2T{p@1<`915uf!nvj?YoET_hF{L zo#|g?`eK&vPL}TpmhT#tuY~3M3d{Et%Xb^g*TwRk&in^i|0?s}$ns4ZNBUpE@~vh0 zdRaa<XZt$J_O+DltD46@H{)5{bVED1Fj1RJY&t&{zrf*{YbxlP79nSwCj~^al{l{6q+gShYtpDk(-`$LVnejUrkJBD3 z-?zE_;duhl|8V~`AIBF3UGU$mpI@+kcJuf`WBfKga5tId8)W=TtlwUyPcnTG5?E2jd@Q`z>L71N*Oq-2Z%s>31-GBKP0@ zT>d>={)JrrG2A|jxP9*B{(mO-|8KDT#XO&Rm*wBi^5gH?^3Ym*Hae~0EmN)GTP9h< zU$>?I>SQY(*Qd?=@wp~*xW^WrJl%?a(`60gzN4A`jTzSPCns9N`)%P({7P)}XWI0!$ClsMY+>K&R(?lp`oeuYbNL!<_zP|MmD$oiZo{8vqd#D)kJD`Q z3vBd%v4xk~!Yge2C)oJ6*zkWl#ag~gZFoHQU@kwdH=DzgZ23=^V8y>`qX$h}wX~uZ zpPZ;IRW^vArbE%UV zSh>q+U%VzBinWC!{&)!GTOPSN90_X6LLHrLAuSrymWAT$V$qwCdsSeAcvvDF#!zKo zgD-qbhyqoC4ZhZJfJ4=R4ZeWC)gKE6gF#Fixw)+|Q|UdWi$n2R`LqUWVu@8!Pub#7 z+!y!9;v#cSL`wBkFAl}&kqdt-Yt)jUs=h;MYmaY|N{*M7Xbs_xxE6>;I^r5W;(;$w z*i>s)y{DWAhE2L8U*?gJslt}{hRq?UD<zHkanhTPMgva!W~TG;M99 zQwwa|=qZbYZZ@6)NmIUTDS1=WNeW-w!}Yty-x0DgmJOy#^J?KpSXvU6V|b!ckLK0Z zhT_`NXfR~RF6}Bqhm>U z1sz3&^ypeV<`2g^uvvPl^hn7RTSvL*T!j|3V7Mdhj|4&|A+w5LxTPf&3q=AUExxHe zr1;7;{FhOO*tC4{>c4;X`LyA3d=<-RnD5tpo^VSt3~N{;ul3My$>M>BaX%Fy)7T-q zkhb>->>mEB0erV5492$3q*#aW1WuNOzhv60Md`%PEaM4Ya@pF=AcR-X4ks zo1(tZdOdDg$R8A8P4jnjgko_m7LOpdcAY=6HslMfzXBhH6EU%nKW>RpBEf6$={aH` z=N6uF>qBu|PODvdDsik$vkozm?%-rRl*VCpZ8WkWv{CJBZADyQgApzjO5n0y8E$RG zIgQd-qt&;!*0=fBhN^3M%#Ra-z=pb*|7Omt!$ex{4f!KxGNYJ+3kH3m_^MDW%3nqF zva9PcxM;&#Ur-3PEYupp5gz7@m}Q}kkSVFInMz+gSQpx0&C$Smqivx`T%kFPOGbwKe`AjUxmma&sHEH|ZyG4COs?RF1xci;#Q#<|)V52t~UhqGd~bCDKFY znr7pmCmg}{s$>|3qgOO@N*fuMuZu=+BHy6lWGiP@?VTO#v^D;~O(vdfeASDB!TJck z>S#=Om)T~_tiN1Yc&at8))H=Q6*Hl*zZLuQHggGbQJ0qT6p9FOOw{g=2i9dD6Nz3! z+0dl2ywI9meq?XRl!h4JQm#~`qp7cbN4lhrz9BCEwj;ArU}an zw(?SiK|P!W;&@kBAkIH{FeG|6EO!3ZjCB&gwx5?JSD2Hsb(9I$A^mQ~Hti`FzS6$v zVhEn2u%>x-F7vQ;jTA&5Eqt~&)aHvvW5RLk-YZYewH`K7(~4J4QE=3kZI(?oDNNE} zM~bZVC^uHs(rQ;_tyt$WT(+$MR{N~=D3Y+zQ(C(rXJWq(8#q}pXIz>gV_i#f{xgjK zKsyS>{2ifeo=TdeefnP684+%Zmg;hAwj!S#a#=}k20Z5)Mu^gitZ&OI%XsALix7mLC|FJ#{tEM6=n z+1fy(tyPF6Zd)4Jau%;xwX`a*p{3OyiBvX3*RBo4s>2bQq$?Yg%2pADGMH0V`6HWf znZ#3qi+b(R)^K1`{Ze0vRwL$onm@8hQItlE@l>Eo#28M8T!|ttu8m6qCy7(|B=KoE zNqp9vBtA=hnx{%spyqE~8;ynI>)N#V$oCUHui*%N%f?8+yEYtQ*|_02v3JAYZJnKc z`$nd+jYrdVaAZ0jwPf7Hv8FNhab$efZr$1+i>(}kCZF9LnL6(!bh*u8A{;IsoUNFW z=eH|mCd%NlZ3V}T8kt4D$}-c8V1GwullLIrWTY9kEOD4KVwO09ltk&$JhVRiqQdnn+s|wz(clbIY&YWZ6{Gv&^_BVsRrmww4So!|g6{%76R@laX=B|J)vvF2p}z zn@QHmFm0Z9nq=y{>~>fme8LURVsOK@0{fPeEW|KHmb*?ed3I@S%`Vf%)9_3Tm1guY za?eRJ%A?p~m#WmD7^$zxwezHlkq=Y3`D75~a^dnI6{kMo-71;eN!hTH%xI`=*=4e# z()WODc>Dg26+PUZjx}}KHjWjYVb{f$K;Kxg##{GRY`EN8D%QkV_DigZ%v&Z_3~!0Z z$m}i9FvEW-d-qLq8Buj&EIGfA4nK2@Eru__qE|>YrEe)BP zt;N2pA<=o2$ucj@nY($8NI7SSG0!q{aiNaV_p&(;D)CrY&x``ieyj znpSVxOyfhtwk9)++L~->GwWMh zGc&V{+8VO#)Y3Ig*EB=4pQH2Y02ovMl{C_^@SBLr%gf8(TYJWtHC4e=d)D;nXHGw7 zTJ6lLs_8YgRaI32uC1v#d)lQ^IuGa!-&&z*!y@V(M_ba$R zgLe)7RmnXs_%DO4oLtXkFgQa*^VEOKLJh-A1^$e^^i-;)d7au4z>`WXuomiF!0?lV zy(!HPrYyD{L&R79i(n=xcv7kP%n6_sh_CVdDm5Qtw|!BP?O@X6$!xb1fmnXPZ}F<8 z6{(fTFrMEg&CmE#=-2%0$&&B*jQo}~wKRqEn|=NQ@6=eNN`IHX1YhdmBr`+H-4*ypyhegrO^cf;tHxY*uZ z$Nh&LJ>RFEaMK6url|8}K7))?Z@I(Y(*KgNIK{z^{a6?46fd-07sD@}eI;q4yYu1DC5xzjKJf8%cb zh;x3aYaNtd6PP-OB5y!a7ar|9K+8DUZI?-WAG(HpzhM+MjP~;5?6IBJe(|5(ZyWE~ z?3F%YZ`@g>?ef=&^@jcZAc<*0x31FOI6^Ixm`DgYR_|a%ky6bih9(6{Zr#+4BXr|L zd$$FXW;FP_LKBfm%0zO@?gu69hm@Rzk+6$HwzJ2%dj`8m-{pGvG23m&OFhI}J+^E0 z1g_Zy==?yv#KSAQDW}JsbDF*Q5!-F(aW5^h7yra|TYKEIofpw=&ZE(0JHOJep64y( z@^{xPdiHYPrT}-XBOjq~7Z|MF%f*Rg*{AQQ+ch~q%;_=H=ZmaEi^8j(lb`rL0ZME++ z4AoBb*p9W8Kf3kX?U_BvFLa%l=&*j>Y5jT@wY<&ttd9Gb?9OhjnAVPrL}Mzs9ui7q{Bo>)hzwFX);>6= zmuO5jlk<8%=K(|pMICFvwFZQ`LTQsTB1_Ku5gEXTzr(+6w|x;%KQ#GA;^d#| zlOMm7{OzlSE^YSAo;69^Yu9d)0lwcjiMA6@pdG%Hr{yz>uJVh+yyswB^>^N@#5A^bv<;oKkE6(%lNg6&$pwgawg4ci-bf`;f=CJH9p^T1Oa>oKW;4q32-{f@OS zOjO&g?`IbGxNAM8Y}>DPXf0H!SfO^kz;778hV6|z>w4GiD8#wkyu&oNPt#pQgZ-@0F=jf8{)xRjT5DH}W zW?t-|Ke+Y7;3PM_FTs6u*tJWY`aZXAf@AGP1h>A=u}j_hy^f7l&~Pvdj@Zrye~A;D ztV6XKHn0<=JTr+7+h8|)Y-g9Y5;DZpGwcu1*^)w+3Bf!X9M768$f z5neXBG`zmufm!!HtQGv3_sSo}*1yjBSDn@o*Lrp1NTT&T*BUt&@mr^xg@PfbE|HD9 zJhI)lGi&jV%D|U-4(jRW%_DMzXWxDvqx*Qi9B`~*9;pXG^XQ(!bc^Wvu65il>-oe` z(Jt$G>a?Cu-qWQMuXWJ14kqvEir25kc1D!fStDS+=Q(>!m&(R1-1>fAY=i5yK52tn zJgM9Q!z*BaK3k|kk#%N2QDvg*#zCL8hNal2gJSz#kf9Gt5gA3c+t6pbt-}wa z!JPGvF-cpyWS!pyhShBtoSE)Sm#Vu`=>29t@tc4Y7UC>kt=5N#rmksjTGG7cl+)KX z|6TP5n^!bn!}lnw&ONsZH-J|yo!YdxrL}1V-zPkUXzF#ioOl@VSGTOGnssiKpZ+Rb zflOV~w63Z;dr8sfXbmQ=zmyW|Dif6xN=i40jb!)-AhUGxD^`TIsHpshf+C1BCK|uJ z@Y4)abtXRKRQ%+Le?urf{Qe#){vDzCYBnjS|3%=lHxz#~_)ka#_!~da`S71L#`y3l zW9Xx1l=J3)TPXf$@Odm0|DI9M^VKoNKdm6Z|062r$w!|nLh;osUEcUNhvJu^o8igR ze;x?Me^#UCi@!eY!a?l|%1HE*2w$2w;m%Rr~RZ=CpPI&qx%XF6ESekH1ZRxI%z zQ2$r33kZKs-!7JT-!5Vk+tnW}l6dD9i65%kB8hiuk;LmQWIqzs|6a&`DylzK$oA~3 zFE3<&6xH7;knb-hNCj`6Pa&;5P^6=gx&cB=IN3ex(vb z)gMW)zl!R&!+#Y~_2b0;H>$rXGF8((D=LdvzS@X4OwPluKkVV-eG2@06skS_Lb0dc zRw({5@P@w(oHHB?e*y5{1^fJkJa07O4gWC11>%ncZ}=nWDiHhmc)uI|M+(H=KHew6 zzJ3AwwQ0l~{%dLq_~@e%Z}`IjF;0j7fC4z_p^Dyw*yqO^_W8Fa*k4&A-i7c-kYJf| zBi``81H1pz)c*fjn8;uQ8vYg%MAK1!68`{r!=)qa?41aI09hT)$B&+`ZulYd@NSSEJ;Yjk{1KzJPa^8XKN z81}94SQ>`?KRops-lpLO4a1)ao+}Y9C0|~DwT73e{QhPQcd*Ets7u2;Gz^)2varoR ze`$uD($Y5CqUkSFyRGItWut~mwftAqu4s*hFH<|3Gc^2jjbEwZ0=0u`YxsMb9{yJF zd{VaCu{hA&A(2=55w+hg6QaSNq-FNIF=Hhf8e=I;Y;-ZWEgfJONnM2CGmd; zyNwB=O11mGQsMA-gXdg@KcojTvlVVDe2(V-X_XV+r|>?7!<;ziyHoqewMyR}rEj^W zZ_)Cip^Pi#lgU32T0Zdpg-{?z~|eJJ4YY zS!0QkBZn=8Xr!1OnZ?(N?lV^amMvE|!HkA{%+d?OHdxpo53Gj5C<~>QHeJQJ7^6PT zYnETr+|s-TqzM_6$!LqLwqT-2*PeO)k|lwS2sTSIzxnUxwXRhQH92m1M|1ISdxwl) zMPu-tE4kPm1Dt7$rG#&7AGzO+9ulTV)_d$n?sutYD8w!hg=QWV@Mye z)W{;UJU>_eNt-lIpN6r0!k=!lMy(Iot@|A0hb)(!&;Df*FltHs97-RuSMxD)zeVu7 zOGH~{AD%wsJl&sMqx;-`&97%R`ytGt54nO3wyseBQ*nL1E~T%&Xx>RteaJY;Q>m*L zFAgb>$MfsgjMSYTtv-q6S~;K8A@nt?R3JW2H#gf)VtE@2?ch=^Q(sf;mO}#+g~st) zt$x8F};5Z)W+*H6u_PrB@VQo2!p8aYGy%Z$0Q-y5qw>1xeynT``Efdap} zx$L(#r#|VzkBEH0zKUr7@lkzJbV2<6jM$elNY+6Tf)9XPA);+R?t6eL)Arhn+@EGjiw5TEDkQXU%bMNzyR>w&ze%Za)B zR-n9z+ik=Ct*noLc5B5+b@3mLCngGxkD=0|_?frWX1i7!+MqS*Yrgaig6S~8tKXzY z+9Bn{8;rtXxkTGC3XM02+ad~wtlQ>1Zo3|S30khTjTJ48w&Avzwwbk!qqy}u0&TTF$w}?YOi>4tl^z01lc)Ez9((;{*Xlsj1IfzzUVOWD z+%D@`);D;TI<03rtY=tJh^pu-+R;0)nyL8v@XK`_1d=bU;-63M)`; z*on3>+B%sn_kk&Y87=pPKTFGTbBwO#xRFQGavJe1Z8@LEv_Zv~InSSa=-vBgX~4rB zdfoqnLoW-@{0nS9XnFk4v(CdEfce_*%^rZdR#}~Oec^L`Yb(z#;1F96Brk(Lzs?8s zTV&qZF|69T7_&VGl9zQO@|FY1g*^w7lj^sDb#L=~1}o&U#_hEiXkLB++pqxHv@|(Y z^tO=ACS+spFl%jP3yqv}&k(d9gLyd;tLH$n5w~@$9+cmcS$@x&cSb)7yBYM*VM5sg z3qLb(O8!@mD)d}Cfiq_vx1n-{h4tOGN8)#f;`fK*2kWREh>t||I}HATPOZN>Ty5sP zjiN=Ju{z|16&`L9s5d7!cO}2lUE1CDn?y&w-Z8i+R?UeO$B?hxtBtIR&j*r~6Gz^T zDOFmx_kgy+s27GIedk4fkMqKM?8R7oGfG~4Pe|t!)ZI1&6fEu_JX>`|>xgB?qpT)< zTC`G=tgVuiQ*B+EochFpWFu6R8+wQ8qM1ks7&_Ip?h-ZV9kyfjMUk}nY{%NcI*4}- z)r4Wdk5!-jW?FRR4-wgRtsPFUW8F3MP5d;=NLh1k3{fN0>GU_MzQZ3LJ7VK+d%p3v z-Hbo!veBC67=KKFZISV}EzkJt9o6`g{+n5T&zkI7`hOdy14aV+T4*Hmr(p0g$#pRC zKnw;`#-ec}n&b%G-x%GMi5({0H|!UkZ`otnp0&-42ivo@`oHy}Myc(}9Yc?7mo>_V zZLfZtFb52d4jmkmtTq|u9ZQo_(M1~C4kWL56!QldoG&JdHVfuRMi zb&KrPx9|o(oPw|XSX-c2ZQU|-F~`JJ4a5YG3g{?2Y76tITZZ0^{0Hm}J3)75NL!E! z4C$7kf}wkdT**XcVO#&~gw4RN12YaZaf((+S-R%N3Vf7khoV%%P%^$X%Jy4W_|Q#jM7~D6MqkB^~oAG_#hW8b=`MGkG;bjg@m?#EDBu6YrWI!fszy?6jGQ`f<%8^rpJW)w378 z^6pdW(zo75g~y&ue7lYgAQG-*+$Qmf1bz__Eg_ft_bH2tBT zw>tmYvo2(nxFxIB;HEj%iXqU8n_CuM$S#?T;V!jb5Wp`}c=Wjtx$q_SYgRWcZduv9 zcCG(S{ivEK7vKte%Q8Q#iuMK!U$?$$XXGpE*qdne%U3RJ!vD*{pDt@^sc%}hF!_WL#*X1gILyzRV%M%3!{F*OINjCy)06T+3IHeqBX0o;oJ2Q0-RZE5X+9g zp{j{xsA$3qFPpP+?eZmR1&^q9nt>>JVo&Ya29ENWGLffthJl9&6?!6YL$%Nof(IlF zJrTH}XXpvwA>xLf+L;Ew3qw67cxulv@Sr8aPe32-U9l(Vuco_2pELdbp1oZI`bJx3 zmFkgBJ%uea1Zt&fqhYM}8AAE-bExo}^}N;m(iyuoqe=Hz zD4nV|k18EZpcuO%#<yo?bD0}&!a*b6$!NyI!NW{5ZY=b??J(0ld6?-Yj0@_g*SJPS&)K1L zW8r5v9A>(^Lh06zp}*`8r5j8APAm!NGM4&XahU0D38foL{T>UY8_ReoDh=v4mUOV~ zVdk;1q-#0Mba#Z(eQylyzAuz+EO^2;*5TA|K`0&H2^l4yfNh||DR1Xtrh7G%ZY+3C zD?dCuR~%-#+e7KPAQ2rUJ@9)EcCn;S|u7CdhXr5g*L4;*H?gQ0W}j)9)1 zO*lNdEIG_{w}sM;WxPKbN;ejIPMCOj^_v|^H__1ZS=H&!VP6sZnj1pt#)9Wvhna4F zDBX1iPZ>^m_i-my93EY+ILvgngwl-#&&NXP#zN1cIf?H@PdQ-{QF(%@OC}M$KvbQsB-%k#-Crr;J`F!m$#$u% zoeh-)8*SBRX!yMvKB|)K8dm=x$#R$Ktx2LmqI1_I+5b=V{3PE@IrqXO(MO4@&q<1% z(47@TFA-I*t{{4asQUa0$>($pKRZ$2Kb|P?2lPExzmH6i?~hE7boXd@)dWe`pyAp2 zK6!%F=f&{^d&AYg)VDic@|icD=--K|&mB+nJkgZ1#}jQMs>WPiMD+G2%O&3*mJ>}( zQ1wIl{ho43{~3kvDU*2X%H;bT*jY?a_1PMJu2kSZD;4-oeNQPRdWfjHpp>X2LDk=d zv#-nzdvjf|^I9PJT?hMd394=^ko+z!ko-PWKy(z*lqm(0 z-#|k0>rY62dlHi0r(vrwLDkp5E@*yR4pjAZG$0dkfB&z-q#$5FP3}Pgx`aTWcrs3^; zfng@7`s*5oT}gyt2l!7!Q!daj#s|X8U=vjR%MzktqA3r-*o%%6gm2LB$4ZEPPc-HI z8h*QmM_|}ILDfGUNAwKQlzTM%c@4jRob=OEbR13A@PFevOMQlZ1 zJD5aMJ_Dl}397!bQ1}(>b0YjM4Z|KL_!!3BX++ic7O?5oDVqz}zU`E?1#BZ{%11EH zh^CxT!1ivZOe`SS^q4Y)@kKP{kp$b*newfK*guA52<#*;mT@@ce6hzn!u{A`c@6YA0ocPK*HnJZw3VIENhoYn0js#r)wKUF)w zuM<_+@X45<3fQs18|HIPhn-^aQ&$1`Jn*e5miOdhqF=zyaIxTBTFmx>DnOhSs8?9XSdR%$^E$k2%N&0O?MB7oXBBFbUDz+36eHZmABD#yHqN9lD zC$OWe=@%9eY{FK|)AV)`+ncVagTFAM3i#`r4g8DP-gd>*BBK997tr)0g+xz+?uAPC zLbh+rcA{s1?uBemxdQgB&nBwqDJ1$K@GDfh7ZUvv_-Xo%LZX$Rdm&K^=w8V7YbsV3 z60HH<3yB_uon}oxqmbw~z)#apEhPG?+L^wcsA6&<(N@s6kmwsQZCpsyi+)ic<@Xh^ zAApM90>K}?Gp|)U)He`SK>K_h=vzQ^JyFHB0-_%QKTW@`faq7SBdzI|6%hS{+Nr)7 z{iA?j6SiVr0Z}LVM*-1~fuE+ITtM`HfuE*_-P}*B9qXIW{s}fLR56?&`V87XK{zL> z!2TTl2x`y$z%QZgpCI}o=$l}rtP0qh-2(b1h`xgMPY~?^ewuz^g6P-4Pt)58qW`0I zux~;8Cx|wIz6qj#B&wL2AbK45g~r{_W8@%|w?=F0wM6lW0i1!1)x02{#qDH(Q0-lvb-zRFs`+l@ZS{i96A*QPR}FQb2y$+%lsMzooz5pRsAsxqQ4!2Wxg zjHiK8qR*p$l*%~UUP{!A{!z*rlojhri58=OloF30xF~Icefm=3aaCH%egzuw{vi58 z3ETT`#2e#gO9|1HM2&c39$ZvHbQw`2-k1-kmJofMs1fhW(I3VUeT=9P?~kHCj3b&) z)QC6c!M1Vim!J{vOVIw~*snt)-k29F$FX05M!e@@{1>x*xkkK~qWz1B8qog5GCwaX zX8#P0cw@e;DJIy&Zp8cFb^iM|w11I|W6XQsLHifUc*cD8ZM1(8@3VLw`wz6g&V$%z ztU~)2$-Dx;1=p$n0`U25g+!l4`xgq|#yoQ)+P_fvFy@ypq5TVmufoXK=g|Iz!Y46b zq|p8a_$dAgzJ>NL5IzL{{{ggr0sBQ@zJDRwzd+_$@bk0K{t1~k!Nwl2Yb z7aH-#Is*HWTJ=v*1AYK!4&VpklZV$g_kkY}?{j!v^KP`i>{BWh5&KPO#2c;(z%S27 z`-}gG3YcLpPEdX4apIT2*de@2!^3KC?RN;5l5fXuzlNVwyLJaP{96sbs^RA~Tm3lKdj-?G<=nYr)juV!{=(aL&Fzo_$wOzu!j3I zyimgf8gA0?h=xC*;mK-uZ;OV%caqfS>l*IS@V{!fRO9z)ctqjjju(1t)9^Hei^;c( zc(I1jn0PKnxSV`DeV;%$A%36ntVI|+R~1RtYq&?l|El4R4{&t)m4>@C4Bw`(gKz9c z{^=;9EYj}o+r)v5|8peJc_-|Lcmw)p%!GARzKM{dPF#LG+J8*@<`xOr7ib&t5 z>CadAHihq3_&;d+e^U57^_=qkGD*K?ij;Tzu~Pnk+SB~K!tYXem%^)5fuOKl>=fe6 z0?%HC6D?HuDe$vU*hY_P`TbgcmzIyd1Nh@u01?pz?~w9msNC0hEl!*RC;Zk073$#al%s_{HKz;w zquM_%)&6n4rr)ON9fjYb@GT1OSNQFKV{fnL3imLaXtFBA-KO-ZQu@A6@vl?*RE-z> zUf2Hdl=hGQ_X~dW6<&9OgbylwvhuH$fG3E$RDtlQ<4`}Mc?v&U{doLB>tC$>r|k^M zuUg0FA63EdeC@w=I)1NGc#Zb&MG9Z1@J|DtAUYm$G9tQB$M1H9AFbu@(DM70{2}LZS>5$B>kFqO8X3-En)bz0(`3C_m2vndXB(94tRoC=K#-l8BVlF;qZrq^etNc z7q$F8ZU1+|uSsDW{Ys6Xesi*vzm=U{6U|e2mEyNc;e86f25_uN)E@0K3@55o{10gP zC>PK7wfsu0|0d1RJ}n!L!wf`Th_3u*nziauIYWaVs z?f>v!2Kw#S@)KJB5iK9` z6~K>I{@I}8b6ESw|5f-`6n?A17b*XLP~i&|zE0s&75*CF3GqpZ=OKm@ZBh8gH2tgE zKBs8<9a_Gp^VjvtpFXbqXNK~ppD6qRg?~ojs}=r&&Tq%5U!!j`oM@ZE+0Rm88{MSq zj{01@PRpB#q{kK}zqbFhgS4LEGs+4!-L<#rmdKdBxz&9y80rH6dO3eEo%fjb(m)bP7CJP#WiB5Kg^b`9UB`ESzv|DfSv4L_*yS8IO% zg!+~dt=9V7sQ3(PeL4UyY4h!be*o}Oq8eS#uSNc)M3qoRhh2Du!)`jB$$*!%QJdBu z=R62+)$rFfeT}C7xu)Nw=_hEoQp5iY{7Z>?HU0-Qyh+2WG`wBIKh^x_X}Cn;t2GS& zaL8|1!~0NQ?EiE>g!5X2VYeL*e7B-~>$H8J1V6x;jneC6@Ygb;txC^X7!PGcixmD8 z)(7L-s7=$~i2gXPjrukGcbXnf{qTGM^L-gnmG%#umva8vekX!ol@ax8`=5>Z0r55b z9Syf>_)nVuBF+C{@Y6D)E)D-k^Q+VRw}M}kk#C0`{wz7Z)@N%O%LDxfe$8}h;OD(D zzgK@6ABAr&jfOES!|CBKE*$>nSo*$LxM@N({{6A=w`2K#CWh~$vHa>|>7m;d#^=7H zqT%ym_1zT9|I!%zKXLx-Q;+C#vHaJ>(!w${G4LA`ic&k{;^r1>T}$(tl}#(uhm@s`$fEakjfap0AB_P7pJ-&`h-}G< zd08`?U1JK#b}94hibknj-@Dl^ihB zXL7<#pUIIv>kz0tOWd4a+q@2~hb|TIi{`seTM>5f=JzSRLmjPr0M;wjF|dD6+T7!f z!Q;6HRLcO#*or6cDU}H^G|GTvrOq_m)Yf4pg-k>LGXu{rSL>M#QFE)DJrBhbWIyVgq2T zSQ+LseUll{fr$={FKhp9jgGf$b9@NRY+%7N8zew98#IjR>Q&cCHks32sPs%=&P0Ac z%uEw5H`TE6y5%dEq%Lh<+j>oNYSo(5rOhqN));Jj?jy^Y5GpZ<;1T;6AQ1yZFt|K4 z0qD9YRF2Q4jiCzei}9~PNpVQ5s64BNpwK*#xcEG9ACWw9VkiY{+04NKCYdm)zt742 zBz}8mfWpNszLssX%v-hMQ&+EAxuAKiROroh!7EKrJMH4;mJ3$3u3WNw<<%FgS|f zqG|2gRLiGUH>Z}bT)N8QX!A4kyL`c%91FB88J}5`O08XTEda6{Q(#HIw$BGX3e0n{XP@x+^YQ^%!pR%sC&X!*&?U1?7`_SSyDW~OcQa(q#8n65-_G+BE zdfmFz>gF|TSFLPXvApF|scUP{4>LA!!St%$a0A(uagVDLVpj2$i4&WgLe631AWZc+ zQeolXgR*fae|PW4im{!{2=N6~W(+@znApmckz;Pm%*baaW*W+jl4ah>A4w;ge3AT_ zC4VHHO7cVs=8nurnS4JZP98p-88^33>kwoQcb^=&gu79WC>gyzw1|%jq6|VoSJD+s zMy-M#U?!BzD1Lv`Vj$?rEJ1O;;Qha`pqm%%iy%ZmQ zh*FfH~Ord|*(VwAPGNYH91r#i^9Z z+8|MTPL}i;tTL27URoBGmp66>>k7rb_`(ZA9_?2&pIw{r=^S2)&u^JKBb8dd3QYvz zP|M<_samq^)Y3Ig*EA!_D4Se6&6rwU(ny*3O;lc9UjE+NGtR833ZB}trcXa}`Z?3^ zae7T{RaKRMYinxGo;IzjYWnnPGbHw+a+rvow+0q&Tb4JkJsnpuY0t}lcpX>G<@%`j zV#ggjR#YIq%S5;p~`*Q>g{k!UdVR&NOAoy>aR) z{9U;F><)WVfm7wsGN;NN& zVIg1R^joI+;XVzXWt!iXa=wp-`l5^b528T@df|9cJsO!vv=ufF55}{)Ju(cHjO&t8?o;*R!r?gk8wu;1N__Bf53gXtAi&5HEu&~$J#b@fo5ns>WeqX zqmtcxvLmGJY|_tm*Kc)p1wIX2_%*Ql@)gv}PE1{RN2u=!yeo1Z9o-%EyF2T5x4lHk z_Ez?N$nI&4pCHs?7!C%9#kc0@{p=YpQ~%p3rfrY@!@9!vrPD-?@s)Vx57lnJ@q*;6 zf7NLnajjQ3jwD*obFGnc5x;f1@99qc?WyBCcD=}b%VkAa+dIm3cH71iBYWkOyp0dQ z-5fpaGtOxp7Oz=kyY<7iTV~JfVfT;8d%7HJz_kV(YrkvlPu|m|K88$QF2%Zj+)*Fb z%2_|+v>r4?NN0tUT{wOpK^|Q22z+b6aR)qgNUt8SU27mKruD<@w9xdisxU@g{XodK zFtP|`iJGD-SNYzOg0!Pes4e=|JgSaNiR;?c&=K6rxYy|@o7_2&Dx@6!9X z$Kt+oLhk?LjVqM>#CDY$@jeFk{blXjh&S%bWAQl!_vxjc6~o!;5tNGoeK8iJEoWaR?`ffDfLN5H2Bq-E@YAalZ`Dl^U+mMbGCo zyhy|EgkEz|o4?MyP{X)SjpzH&(=KWw*bBk)PUvYDwGr%y;Q6w~uhQ@j6b`#1c-Crq z*crj|J`Ka(2%g_+de|MobG@d={cb#bj|q3DwZ5N)o^}bbJ~1AA2HZU3q1RnP)TZfS zs~z_sU>5|>=NR6`>wG*%>phj9D7~*xIPA6HnXT{}H2r>s<9<6Hj8o)q*8Qg|ykE=j zQux`L{vn00*7QRP|B1ps&h=~a7pWLW$p4E9uU2@krhh^Dwf}(UhsZ&niNWuUg})sO zgAat$V}osE(!rkaoQs=VjK)&x@=NBo)J{vyUB0#@wY;TyO;gLNHFIjuWWO**Ioc>e zUK0JDko<&qVM#yTlYEzRp#2!U3KCW7EAdvn&Fd2w?~Trn-$QrV(}Pr~!7(1c?iIf;K0 zbPTR~lQaV^nV+gXXX%Qjl`B(CE3RI(W_io9Yf>$5mL|j{^HXQmNbZZ-S=m~VB)n+_ zeBwG<+x3>12 zvuBDgPMP$5qvdMXQZKwA{v9H>pYmscvZwh7gVAhNM8a6UsBd;8-oc9*p zI3YhQsj5m7^fHRbkfUC`cyUNtAD1hdd|(t2&yy*U$L4wtB{$ys2}x<>#{DY=zu|Wz zM824TUw*l9)h8vtYn$Y~XqDD?a)QcXdod%w{Bq;1a5e--NxH7>@8x|6t)80TE0v}l z%#%tjUfZ%}`O2&HL!5rw9~S`ZqTm_+NmLH4#rRJuwRqLawJj+~w(&8Z-_|`6)rk80 ze=7Ag&v-V+6+DNMYny%+Ub}|7Y4|%H#m_GwE+?;2p{-sQr$tW+{kENwzUvupirBUnrN|p}Xx_n~b1a#Mg#4kdu4$02)E&5Jh5p)^L)ETzlVk0)U8{p7?a`tjb?5)0%ywBi+TmLJkp4?V#E~IEMQtDz9kCuzS<&D*(O`8n_}*ynorAZ&o=y)gNvG3; zXX4)wf^Sc!(?d54nj)rNY9zFk+!<78ZDiOiot79I_QPPM?Tm0dNTi*H0TxF)du*q@ zGM!FyU7YTXFH!QV-EF@~ba)?2r_-JMAIBFreFl9FwDG)!>2z9BmYS5d*Un$v2l2_) zGZb_z`L0L}zPpgfZ9guZPICupuj1I&j#xa~MK@w7dJ-oXTwX}XZxJU^LA`OrLliHU73^BAuak?F^2&5yo9dCPakL_)qfnVO;9NBwNvV3slb)f#V?c6=gzaF>U z&HT&t0Adnxl_Ds3?VOpx$Y~#V9i`bW|FE6*)hKcDrR{A-EG9#H zA7|qAn#%K80taokVSwZ8Np#mUcaI|j>H6B#0W*v>|Ime|o%aRp+!v3eTu57bpSHB) z^D>u*q`hG$hAlXYj1%cNPOrotvU7Z&p5yy_a(qt?e&W@1dhp|~rPG5SL=U-+JB^g7 zJ;diDFl>wiDlieRefU+7)*PC(_aW$c`ZYqNSj>@V!Y)auUxS20w%dN%tLe1UK8xF+ zy@u1Z*B}%t-j%No$8kAg8Mc>LBD20VQvJ4Izb_lsViR%3P_+R zJb4tO5G%7ZiozyALBD20;fe?f^8|%RrmlQ7C?J8N@Nq#QR%T@sg>{00e$9r$iU=+|s0+z>%wwV)8m)RnIW1td@uU|G@j;$>Dv zQLqIC{hAGhnqBM|M!fRjTlVoy;B%$9-5<4$R9*7{vQ-K#P$JGf!`34dwY9CKB zR@xh+oG3~3TLwuUi%k_2mX%&B07vPH1HXgRt{ zF{tsPUMAf4Okk4ed?w{YNuu8}NOD@FRNftUvB>_qN_z>CXt_h<1Cl5W3`vH4l8n3( zu-DLUCP`mjlFW)A$4hZ9T8^vBYDEnR6t(1dCW+3kQcjd4`YnSbb0ekJA>_p(W8s1C zrd%Y^a-S^^NTM__B&kBD2NLbWA(DiCGf7H0R<0T6iU@M^1i45#u6#A9A%UU>2bADE zGTBNwQIhDl43eyfl*+p?FBTcAkT6M*M9ckcSwIq{fsh2dMwO-nh+sqa3O01XG-%FP4+PUt>mw1X_3rD(!NXw$nc2)pUAy`|MYtg+??8vzjgk?ZsWT=kWl-7G2Z{ z4JNVIhb4BAG>__X#6^$oNKU!C=XuRC;qwO%! zY^VD`vNB=29w_AA4GPuWTQXNf9CaNJ-unvr?jGmv?XRTMbDdqThab+P2a=T@aygK! z+>AmGBrE?3I37q={tK#fAX)hxP~bqa@@^2U(?d=jJ=(>D_#3+tjm8aayfzu8BV9T(*9U@Jx7%JUn(aa)sklC%xo!^SDkC8?*T;ffyKFBO&33IW zKi9`J*Xf~LW#DAyx;MzR*DQwoXrg7QxbD?le{L_7LFeWMotzs4?^ro9d<8N(seQbx-{l7F z$qj6ZiaAGtxd z<_5Lr2Cd2sx-2&c%5OO^tj-O>aZ8R=iQFJKPRo(%XSqQS=LSKOD@TT3$PI$NXO2`? z=LUTwH|PVoK_}$~L4_{|8ZUtB0X%@CE$)*q_qr!MpM8W&Hd2 zfpmKCK74x}|5DGV(}NG3k{{DR+9(5V{XwUcN$;{# z727Mb;m+1XHd119Cd<@wS`WI`!Hpwy!`s8{;&i(W2WLKUyR2VRr}gU& z>sOuDuVzup+Yt9Y_=2?kt4ywS5PL%G$?p$N@}H2slWt#uYvgv1w7S^ka?E9ftApMY`3n`tv_fle#CYgM(l?I1P|~b;p4Vr$rej)Bk2_) zoOh;saS5&um*D#Fs~^Am?TtI@{5wSR{1KP^q|fS-mAtiMXo2nF;8?mb$}$AmI6^Ix z!+kvR+p~7?Rj+{DwxgGsvJXiu0Jp6?4A1&L+o|uhz4{%Y2EpyWJ)QM?_zZC|%M3D@ zfo8OE=R9*0YvW(+xI<5WbH`mxNF~8(*zeZw-}n-@84s7x5x(27Uxv#Ncg!K`w1zsY z-*;NS*N!85-1U7%#=`xA*{15+;Hm+4K&KTr+D?7HYwgcE zq=94EuJ-3%Nv9J-CGvwhIo#3yM@Sp^|13)0)G2#G2-|}0ik+XQpA0);u|YW6;PoXC z65RrEhY`PR;cR^ITPRb^7DfPvSgs1-kbH1rNDvl2uYpt(d3c9B27(@e2v*Jh9>NzK z;u~@6;|vh#nc%F#JH=Urcah?K(Vbu{qC3VIetF_$Vo>4AqPKa5k>2jIokuguRjHkp zt5UlN_P*3k$`z>{m;0>}E?21{~L|A>~L9DNgyo= zAEso{bytZ5kG9FeH*ud$1lg*%4t0A zD9#?KlGDD-q`?o{kpm4^4@Nvbyvazj$fUs!+iCBSa`@$VT@sSlX3oLAn+taz1m3ed z+dE!Mr{_4ou{sjA^WZ|xWRLCKJ&%7qWxJdCm+Jv!5VcLA_+dM8xZ!$omg22@EuB`T z>TTwJ!RG>;7(!=Ap&^Df{39S*oyiwJpr$0~^UIOb5YOAjN%ah7kF9z+L$s^?Td$-cPDS@O5WOCC8}&CES$xE@ZQaEKF)dKO7w{9 zAz#}IPC9n^+=FKWZ!~8U{(y%dQ~MkQnb9K*6=X`Udq8{Lg2JHlqe``pgx4YMGYswU z!*<$jUP*ZTay*>-NDVf2CmgMcFsgw3H6{!Eu$}f1AwRzykB@77^2_0kPkyLHox#aF z+wq=7$<5tue@e9dCeij1wVu?;gv8gbX^9gPp&zZ73STo*;cIE03SUcODtwKn!q+&r z_Ol=>j}EgT)}xX!3=yu<6Czycc82Gu2v_L&n9>sp?1q`hK-B1d8$p1pc|w32iG)0R z!AUS%LV(Lm`M>-;H|)fUfQ7A`h2#WbD@R-@qLyqRI1AT|-Qqo$%f&Rxj_%*25rj?`p2B0p&PTKH?$(h_sep8zi99|NOz{am`)FV8UNk^W!Vx4 zd+viq+gkj(1OJ|Z#AhM?eIEZl1ljma__qh~j+Y}FCX03$T>-**yIBGHtBsjG$(z1e zNVJ=eF~Zl9c&j*hHk{iQ2(st0kc-&K^M_sc3C4(t8KaOkleMfgca5dH;v zohOQ*23OBxB*2|nQ%1NPZ*$x19W(S6h96xO(rvfCElOCZy1SmJ2q&NomrkcymVt@q zDQ7bjek5dZ;wA~1F>4nN^y5gKB*WC`)&+@x)#$aNt zuGn&2znQ5~MydvP^SI77?nR9nYwEk8^TZn(Rl~+nbtF?cs0Ng`Ge|I4G!A2c74Mj9 zaP7UjnT>eLE*{**yaHT<)d!ht@GsW`C=b^V74_&|OuuO=O#6a5_9cos_9fA1x-Svr zZ*v6s6Z|t)FG?>NGk&k$&8kY-$OjF}p%D3SdagKNOA=`W*^&qbcPIv1ia}2wB z5A*I8&8P>39X!aIo=mqgHo)R!)IWX8i_IWprxTTI5iAe+ZcyrZ;%vv-i?eSWg~Nudh{w&^Oz%HVF+4UvYHTZ+n{uAd$v&7&^KOYA(=d&Gnnv zrF_`cYE6q&i{)%yEOM|Rz^RSPNwi`Eq1$NTJ1P)9zNRZxJsu)n)Oh^TLmX&l=&28m z;%?y-Zadx%U)gZ$_j+?)+xSw6?RYq!Nq+Xbe0!+)`%Di0W;>7Ci;v!9dqoB5{@v|+ zAvp&f9T5nHulnerM}2R`iO16{V#oimM81?yt`jY%b;dz~_`dxYY!YGr#*u<#JJzi6 z?F+77@rqsUVPX4HDWs5B!-#t~kY@G#8~Ddl?q>Y(_o3|^(~-lGjLkuo^A_B_kX}1G z(!6{f+hk|+;~@_oeS>Z8N`9rgRD`x~VT0k=lWQaApJ*_?Kke!((qKzWgW*Te zV4T2dpUMp#-@JPse-97PpurYI8jQuB=BBpaU?L3F215dEu*)ZmvcVRbD&mLj$Te)& z<2%}pHzQYrO)rl$*v3PRurKjdkGHU?a8%AsC1>VnD&7~n`ieBwT+>we5j2(ANatuO z{+_+5U~t6tX9SggOLl%!@mJGSNT5x1XZa|b3U_?DigMXkWEHL_A`1^Pg!tH6jv(sj4F{2g{Hk=RJBB!N^G8J zc79XwSJPBTpiNa&Hp-@gQVv&9v~y&C;(D@8@gO71+El+D7ilWifsZatqVv`l-sq^p zu_e!J1<7_;H;K^z?BmN2y)Nf^+b;DZOPI|t1zD4UmR(q z>+-X{o;Th|SW1{i`azaPf(Z$(huD%pBk`eutFH*_D>99QA3-B=0;io%6S6dtoHB$P zX_jfETl_}C(U@jrS_sEi+B~;t^K=!Daty&f2vPfT_UyUt_&n{|eQnvU_7_ci;sn&|h+(_j1 zl>oPd)*1^%ZH)Crx|eN6?;fWF#sCS=y7&1hbFNyMp%I7CSqY!)I;JyR<#43Pw3T zdrY$vuoJQZK5Ag?LMZ`94JdOE$NkEe;-L5B2Epq?j#Mwmc|x|bV2?jX zsz-B!;JqM6s;}k-b>svMwPw$4=&BswKb+(Hticy?6YV^V%$>M(R))J~U&akH3%ALJ zaEI*kP%AnIibGH1tm7Lv(zphMzlc}X+T1cnTsE9emmf%{zyEwXz4X7+=}9l9)Ax*| z(+w}D(ay1uA;c-Jupy7Fmc(Q)m?Jo`OcoK-fO-Hd<#?(B3uP$G97;qq`L_5D0BRPT zspwmD+fKdM5Of>1#ZKDMwR9$eY7|5hv19g_eMhScYEKY5I;`X+CIYP<(TFf3hc&za z!c|I+Jy<5AvP->{4diN!m`%rsAw!m&WEwL3wH}M(9WrFtiO=U6WTB&_$cl$EMHYmD zkyJ%@pP)OmTnQ{P)sXU(3gPg=l@}7SwR)gD&twLqN$;o$&>X&i4uExA8XD>UXT$hpv_$yK(1gETTkB5sh>^Tjyo1 zMG2l6g-(qIkB$bDgHK}Ja5egC8vXUlVG zL~kRaf5R@}KNW%%KRrc66Aa{_1|ET1pq7OA(dAvK zMMPvw$}4;Z!A%#@7m)^VwR{iw4l0}mPjW!>CxFB4n{UVbJL<>tH}G+EcgVqKr2Mx! ze|Ua_^?X)Go=rrb=gRx|86Mnn{F2F!p>`}lK;BzX0k#C&3M(}?t^Z!vB6|!^L z3-%dA%tm#Xep}KxE;RjsMHY1mFwa=jMB&-C{ zd_x-3XSi?Tqu)>P;}{~^2?PEx|M4**+KWI_g6Opb(Ln?n3)v-|Jnir+hR#|~V{6J-kP{}HwXCUt`AbKxs-~%$dkOnoRG@S^%4SvXo1OlhW2>c7u{A;nK z=|Z&e<0Q>55%?SU9wQRSd=0#SG!KrGG?PHk<4YvXX$Z_Lku(fhs)37-B1-g@NSdn< zt+rIstVdvOsia}Zr!=qyX?|NOX}*VOv&$sS;|Qe6Bn?Au*1#aryig`-iootZRxW8y zKwwR|q+!U{H1Phn5hY-g@N#$%M71xRDA9h5R``Sl8W8Qv8u$_*T^d*o$a5NK0_2zq z$-o7~(!l!wS)qY%0&=Sc{u?y@fd;w(`I81724r$la@z~Y$w}dn{H##Oi-7!nl4u>e zDIPP{-iq=6R*$RysksYt&-;`^vs?h4HQ-Kcsk&l1t%FvQGVf6nv|ex|LB z3U{z07T>Q4eqeZ<0NnpU{d@3pk504iFC|L6tjwta(autS#|Sw%2ph4PlwDD(>&SchoeC<%>`XBDyukQYjbCK%5(W{m8oLY~U} z=>R`413imL=aV{Xq!G9g&LIutn;LirW}v&_PSwmBKN5c|bOtQ9JXJt+i-DLjzrTSB z?@3&ujrCm&TR2q3J>UTZ&e2i};kENaS`b6lXyEv9M2W`}l4d5NT`L|@=_3eqYWpw* z*4BXba-@Nm99+2kG0HirNY*wRkOnR~G4C^EL$UP#FC&dpOtk#}$2uIq5aQPV8`CZD z1HP}|IJEFL1>!;eTL^5@{#ycKq18dtr$NSxiiqlo=wSrDT_o$M=Zc9E|6W7{m%IE# z*HLEtV*SihJ0=K>-|rLA0tDa#7m!s5%+-cx$fq>WS&1?dMDIvwn&ARjfc^{8Of4jW z!y8WX^&)|E0rGR$5HXOs;{@_cKyDfrg1lHFkQV?s1Ewxans1Z|WD;h~Uz8Ey!Z4R} z@pyrp2FQ)$iLNn_cT5z>Y(PFdF$8(GLLgTFazaw*z_|_T9M^(n$4F9EPfATZ>07%u zpb_t3B4ZWD4}QFth`tJr0-wfy_r0?~R+0Asg3CCk!aEoO8w-hwOgH#!v2=sy%7_YI z)}7M5zQC5Wu1a>-}>P_-?86iEGM<68Dx8yK?3y+9!U3 zs(>vJb~XIqM?*r^hIB_l#%lq%(e4D%y}D|oy9;Ez9$SdzSpg9Q%lsIgb4uaK2uCu; zzUFKAdLh=g5+5|}qryu*q7z0ZXzGa@X2LOHAr`^t>F_N}rzhC4F?|^UYyrTJcVT&r zc5gR%M(g?3<{y3_{n!E2?t{{zZ@lWtA0|(L6+I&zsdU!A>a>oy)~g#w60PUC*2uYt z-#R_+!$@5k@qz)5?Xu@|+dC>G1QSn33}@+!aZds%p|SrIyw&Z!ACpg9s_V(X@vN2${RPA1*XctcGPHcLHlH}(B>QlP|gJt+NfRj1cT&pteap?ZBF+`Iiv>wh%>pusT z*?F4b5n0BlX{#XK*jqAt+T4Bm&oMH58*I57A!BSC+T`VBM{1v^SB;i(;4d7vu2d8S z%Is5maHF{UrSt6W!E%>#STJBIjTOv!TqX`$ zc`}XX>|>QT1{{cKKCs>v7`05^`6yge=AW_O%Di`Dppl^yYV#K2nx_{8{J~j2x_IF> zHKQ<+3u+Q8QR+6#{Re01YSn|2nUhzUw?`in?V7!OhQzy#;j^QFx^C$x z+qdznq~XjB^+OoemGYn)3yQ(0_1iCl2C^0c>Rj=l7pSFSB>)EOaCm+5R@XehE5e~h zi=@s>Ia+5VEf1K@VYiOqb*ABbCc89w!fb!P6IpBeBZJ#P6oIUMRjxqO^xHGLlbgFv zOklZb4Y<~TW9@gX{aB&$;?mU1@7e6lHKLRrYC}0P@AEab2BhFVn2})J2CIJvOL+7` z7~ZW3aXx>P1S|Kf<9bx_{(s<3hcnaW>?1zk8_mdkBO@RfN247&KJR%8Ia^M;EImDW z!c9C0YfpcZ$4^H7!AJ_vT;cHw)|J`kTKkgsbcIHK_RJ5z zJ#H}tX5nZ5Q|;mV%YKvllTszRN-})XpKW6+v8;`E`1C#Gd@<@(&|$AJ!j{YbQrA-9 zHWIVZk!9Y9F8;Z7LJ}=|cO8rELTzYfGvVDCdF5O`zk!(OGO{`Ov*8!zl*%F9vh3Tp zu`G5&>!l45J$bM&7C!Le)3eid_uzT9H@h@#KLMM5n{3ajgbw@Q7Kk4`Uc^Fd0i9>Z zYLkT(+lgeq;X7FO*NjfDev|F4Z{xVEr);7_{SgAe9^0w!GSP*Nay<5J&Mwq+S$rSF zs(8#699R~W8YHnigO~N0J)i!VA_;YQqvpseh8`a@PrVSTmJ`YOE8J%@`55rUZ7Mco!?S?qa%>tz(>WiHvUU#!XM zMmW+Qj$RXnpr{+IQeYc9^k@}UpB}_Pt}#O&C6k(AN1Lf~xilLqhAXn7KJMuySEEHP zms<^yJT{r;J}3&}kE@cgscnbtS#7pk-(fp!2~)niYO-{f?QVwdGJl1+V`z4E@_*Q| zcvlY0#q4Rcx3R%EQ|cDmV^!@fw!=*=-+ffYs2os*jS7=!TeeQ1-45}j+o?r=bSe26 zP&LtG-)BzM?Bt!@ra3gCbiw4!*T@|fAsctgWc#~h#ckHp$(#R8=rL@2FzXQ>s?hBZ(^-XX z;St}ZMSypN2KfN*-9d4Or2tF`U7d+{7q_7&drhDq4W)NzTz7?9oGH>Z^f|eDqAQ3T z>-INo&+E^6*su;`2AJ8iw%i=K@qQA_*+>rb&@Hkyy)syj`LoI{t6%0ew|=;@VZe5t z={P1#K;E75&N?(-Yz(dEWWAeZj`h!EHi{n{zXm=J!HziS1-$HLvFfnxHH1N|b05AE zprzTRxH|Bd!&e7-x$>9*P5x|F(CX!@1CNES4)ns_IMmP|(=npgL-LB8+JnRAvXW8CoZ($(ECx1%zKh%uz-Jpz4$~5GeZy{HZ^S= zNr*pU>*-GG=~>jeRjw#S#lW>4!+k!@Y3{LX0@*kwJ-eph7SfKC@IkYc(MqnJms@P}}R<_5>^s&{C*;@2zq z5672KavT|vvq`1+u3;IT8Z)|;I=BJGE`}{SP z?KTY5b++~!W)-HqFZYkHr@$0U(`IY0L-Zev1hCfLp>vQ8cq0rQ0;jRI!Nb|xAdmd; ze*^uqtQ2!;o4rXoB-rNV*U<_jtmbUWFtYFoR%R@-!8sInFnyf+Wkj;hQ}QIpOS)e_ z&+h&sT)ZlaUU{Ar4VDajeQ*sffj=(!N2w6n&%A+Nn%Q$qMUV8a z(4qm8o!^80cl0k`mY|#`paujLGkcQ%C?`>1HGFDf*2*o#x-yF#q_YUkY)zw+fB39C zJZGA@GT4uW=GmA`B)sAYOC|z2Mr3^i+`u9~<`)vlx=Xp53$c@^gn+vgm3erU?@?R@9ZQR99t94Fk3Z z%PYHW2iva4v7+au%VG^d(`0Cu@d@hrr9&Tgtveie6=A6rmc9!YfG1R~@ z_i9Yc8a}d2b9^hp+L_Bves|*E8hQ2*`y)6La;$ehaWQ!Ech{P~xnU%Y(J;pn6mhE4i zkrajGxn`}aFz8A$*VSp|j3XdG@E-r;->Uqv?J573#gq>W$zMKD`L`mtqhp zQ0rv}?`CBj<#f>*A~OH^OL3ivL)vO86%23w!x=?Tff+KNdy9lHyh?(MsNZ%Qw%T6n zHkoHSWS;2=<{1FYf?;dOx129y8QjPgPj6vwh3rBP=V#$$5K3<0mG~`#xQr^6Dg9ir z2ihvO!hAXo8T&deGJaBh^GPS;X$TbF!e)AI8LBt<_4#5*ifyu0 z(UW|MB{daU9U{r0F?TvW!pEA(ZP+eN3P;Fd%a1U2OIFKtO2nv#ci6jt7P;Sl;k^!Q*873I7jS7}wh^4J4T55T% zucg)2*bjYa>m@3c)LO+#EB3W5tv3+bqOYxJt^A+0*E(m;+>)G#-}C>TxAQ!(v(MUV z?X}ikd+qz)1R*>st39gcV2k!X`Ot~$sAPW0q$7F`Yh(OVOXG3&Q6H-t@q)z%#5TxJ zksY?$;?(WgXT^huJfX&Yq&xmZb+3hFrUf(0Fr~-HE2Ci_yf&^si}D|@XfL>5?!p|I z4Apt+&79a)=L#d{x8wLr%g1%iS4PfX;3qC=c%rTol*+?5Dn>ZJ@I8jIx}>_dwt1jk{62!!MA0}$-b}DJ z7TuW!6A}2DM;Rg}dqTU^W{sBquX=)>)DzRx<;Y3uhC_OJm#=kHFYhvE78eKP_D5>e z6vBxR-|ZG|ZNmk__r5Qs-8SlfDwun?V8Y2|zf4&82;BvvZ+4Vdcw5%0Z71@eV)#`S zxv*Ka5PW4bHx$;kBGo8FHYw59FVcIuypt$09bHzf2=lGgbnmq6{Rz<~v*pd6khD3} z`taS=zx9N6(O;%<(4&ru20tjN_Ji&91i2BCwura9c;w+_MT}miq~3C97yYG(DI4L3 zB)N1>tXGc_b1Bke#8Qg%-jF>V&)F%cx~h^8PW;>~7S)ec`#sc*!RzMxfP7gyixFNz zp~(}uzg?sC6LtPZwHcn!F1q`i5h7{>E!SF>ZmSBXJ)y#6)f)<(=b^h!sVU>D-h*=6 z{VuxgK0XfwAs$pi{fZ~V@63jF@s;p=52Bhru-b9D)RUD(_hgE)sO7U#@PvqH>TR+2 zZ*f14J231T5=}m^+*XH?*L-h1NN!JE`mFNgtm*ztVn*Go8SgvYs~PWm-xCYy6~-jG zv2dDNBjjXa6!FGKTuwaJiaJWDqCGsp;=|U+jfQHp9X@Kb#tB-I= zu6X&MgYjA9ctW{TXXB(!`V;56y^a{$s*3gh)_C~;HGT8ylCAR^GjZ_$=kadzdh6}~ zi~9RKV-PjgSDTWS)2N-Njlu)Qy`C1;4)3M?E3%JTH4y5~+wH~o(7ug}A{roT8B955 zBH9`?29LN?T|^&S`K+6J2V5RCqT?+-9n{ncGR~{#ctvRWCGZ<-y!l z^dS>{uYiQPor#aPgG@hWE zE|j?(P~SE5xH|V0+7&+A7E*7~NFte^OFnTkPvwT^`>X?n;ZGr+#>;P4hsd{Wty43G z^#2?LQmiZb%uYC;NTgjdqs>?3qff-qEZ@m~oXJ))AD|=+M)B@+{5nmF6EbfM0mw)SaQ=g=(KMf zAIQ|5{upcAg>j3x`6^Z(g!m)vN<*D9@-_-w&qCjq?)s2c4?Yk*rioxI{})bCy<(>r_?9*norTu!%~b- z6U2faxwtH-<{L(IH7;{Thg~DM`~u4nys99}5%TJjC)uo1Qs-Kd%*`*U;Rl!^-ZYA2 zDdt6^6Hk&?z4ah@U^U^Y1ZKId2aB4lwH^hwzWWn=zcRu1f&||)6MP?if}X-WaWg$Q z_Za=Rj;3%&(SJMWzkWQK%VosIVeX~~?~OHUsWNcYlmuo+ew2sEY~4H64ydat zY#Y!Ts|xVN67<5~7Wwaf({SGzE`RQFNCOlpMWdauId1Y3RR zHoN@xl-tH~!*PsOB&-)D!!=B1!&~?jv51`^62Lt?Nd*SIQG{)!N>Ut zTWZB+$osc1NIcJKoU(!*Sc)xl^9?gjl$k6$#6Y^LmKt(}}}@^|~oF>kQ{7 z4#uMBcUYiNQ(_+2#8G2kT%?Qa8Zhp7(b9$_L4VyTvznKklOOz61 z6FR$YsDPgs%?2PJ1wQwud^0<}!aen%lT3z}pTppYv zPI-iODZ82SYOJEw{GjT8g>AJ(dQo=PxIbnU3q3C#)-%60+oIn7)%G8`F(c-<99sBoSUoE$W6i2yUg-(kO%GE_m2#qIJwO z7`!IzX_1Wy)QzN<+tpqy^@cn1A5^c$tF>o&m0so8V&)=J+LYJQ!dF;?8DO8uoGw4+ zX?fgA@HjtZPj!g?GR<<1n5BD%CnWXU-RxDO-RG(Gu zb~AjArqubR?I`_m+i7P%s@APFSyjv4Ou9wg6~f(Gf_s9yR;Ylwwe**%&Jwkh9~N;> zSj3&Ntnai$+#@31qBc$_GG_6&m@LODjHo>#Y*uD-6n7%)1igDImMLGar+8-ZVr;0$^|3&JT1l47tp6d-byo1B+ z@hT(c9xrYT7OP^SoZd}|ay?pM(S<^Gt_ermx zJ27v1*823x9QcV??UX+0^%ZrsoBd4;?L*(^PE-grgqgLzPkP1NiP`FpYEIM_nVE{^ zW&TZ#)d6hwR|M)BX3zE&`>JX;`5OYh^2R3n*Y#D^)dEtA$jn}_wBFxPURl>bk|T4b zsR2twKG-4qOR};f;5LafvrLH6outp4Wqu9o#>p>{y^a(j+6^T|%b+!-_Wl;Vrj?r7 zzjyLZ4WK6|C(t8@G8L@L6=TWo$%iDGMb%U9aM6Qg7(j0}(U0)f*zo9mCHk2oV03_Y zmkp0z4eBc1OE$dG4)BN-Q@*hd@aA+AkKXJw^PB7-KYEqO#G?mX>DLu~(nFZt!5irc zUGea#&--qt^%N*A=_I%ZBHq{=L*q zyrkr?e6t#Gk!n!*e2EQc74pC;82>;W^1K z&`rEYYma{3ZFq(Qyuri5^8L`n>%w|Rkqytuy#5v&o)f)1X~T1(myc|CPV_Qucvvq^ z{CuSi&&fRB9vhw$y&SROInhi15n=h9=%v7h=Y-FjY&1zkmD%u|v=2LMcuw?r(1y3!fnGkd;W^Pu&Y14$rP_w)L@y88 z@SN!7O&i{72YMNt9+uC^ymyfe&xt;_bQAAM8{Q-b@_l5(b24t6Ha4u6$ISe?&@ZU8 z;q^58gI&PeW5aWz&m%TGCwl2WE-arDy%gB+oap5y8=e!r?6={W`-8gDuAH*rEilVZ z!VcTN@nOCE#>DGF`K__xQto?fc>goF>v%_OcsDvIhyGK#CtraL&q=x8 zWW#e(?)zPCru5@=R_}aY2oHasVOckuMEd`|Xz7uoRMH~nrG+V?Hp#Cy_)S8C?h zh5GQ34e#p?@TScO>qWhU*2R8nr48?I4)WV$!*im~BR0GT9pu+PBP^dwHbQiz{0eM% zS3Bs}-DJc2P!0}sCBOaM#5-lfyWBy36EeekaZ+#B*zjmAo_<}C?=BnOoeuo`OEx?w z`)!i4!t%Z4pxo!!@SOAu0yey-9oXk1HoRgwZPG=3zHP&^=ApabmquoX_41)v4qd=2 z>jvJ=)u8hh#<rIlo~mIlsTI;Qaoy zg7a%%!TCM7g7f><3eGREg7dq41?M+h;{8`}ey6VF{0?8q`8{$a=Xc|kK$Z)J?@FLa zgOPhBFwO;I^p!xi3&samaJpNrV7|()07?KO{R*HRF#ft6fCgj#a;9_3a^MGmv3fZ$ z-UTCbInx=n9C#lvdM^h)1dKP!fc=2+oidJooDewotFiL?2O1BKC)nNQ&8Sn&Pe0Lf1anCYfq6@|i%Q(LhNjGU3;~&4A>ECrZ z%lF5nz)lUu;iW8RtHc*C1)c_s)TK=K#1hui6H9OQq^e-23`UeX+ z{k4Uhenuguf8N9CH+VSxBoC+mWD%!-VG*a_vWU~CE#mY~F68t#F68t}7IOL#3pxE8 zZcbn0=JW&Ioc?eDr~hsNr>`&I^wSGC{gVqg{n`bbe#`<+|KX*a{+UZT{mqwh`hJ&k z`iJIoxi!uQwrMcd&*$=*H=oOIkc6)Jz&A7)@67{_0!Gz5V3GzSXCC8Eo(KFCFowzZ z0Fi1bQb0_omujIQa<~U z9`gO|JoY0m*?uS*3&iAey&bLOwH90KbtQ?l_jciW;gKU=Xo7pVi z7Wv+o&2*M$Go9%Yj>={_XR?^ieOXNBnk=SMmc?}3@;xVu^)yhvf0)U7I+DqH8kNa< z`CA6-<*5wT$Ja7gKh+tmmw6eimnj*nm(&c_%cnC~FTb9_eC(RR`q?mp^|D01UowN` z88d_BIWwKZ_olNv&rN4}Hce+a)=XzPil+nh8jKYA{*l4_y<@OHeZ~N8(_ma}uspX+ z<$SzTx&Gx#c%gjnEAh83WO(g`z>9#9aUpPr2IDVNfL6fx-4v$t(n+*I2Fdm!Ce&FuOz+D=Qm6L(}8jQCu;P0&$a6UIleBlM`2l~qQ ze@+7K)?l2P#NU5EiTQYH67zevgj*-EKe>7maHj@i#3a_+nThN#PE2IHUrc1Y2PFLZ zM8;b=k?BvH$o?or;(waJ-}g^o|MRU0OmByLzfr!glkcSyI3HsI>*?%x4*xoy^>lPR zFoF6bVVHng4a8 z8E@)n#v3}C@y?E7yq6_>cogG(Z4~1*jbglijpX$2j^y-@jO6;bQ^G9*zgpl$5?(x# z>t8=f_m>guFMc)*>Gm`_OE^%ze=wNwUzG4?gPG2^Bp#5^FW=n~A1z^ueE-`ZrqeFre+>fWx?o&4 zh|6)+Am9=g4A&swd|=eu=W)G#O2WG(tdr0q;aCYj9mwgwI}o@7FzN;Z7rS6o4g`h* zqgKlI%LX$469zIrAE$EoZYtNuAEg3!0>-VWTpu^40_y=|O)9VjFcu1YRw~mODEPE@ zpY;4%3e)*k3UD=G)TS_<hCd|X z^?lf%uI3E%I<_^Rk zVj`{QXfRGEaliW6B<@e#n8f{u(j@Lj4^QI$!tZ);zW4Xw_VQ~Ir%SJ>otx2v>uvuY z%;(!KPXDBY54%{7T`q>Nc5(U(T)<=(j4>{C#&%R+7t2BWZ)rT;q_N+;SYv-TPQsxA z{|pTOgM^2{e)HSl^j`t{&1$gUr2W7&-d-l(v*i0wu;08EK&}f$1%QiOm`myU0HYQI zxWEO2*5{LfQG)=u0HeAbbT5Q4yPVs#@#TO^gE6EW*r>rcvXKn;HxseRsjQ zauqNM7EB$&>0e&PcJkOV=H~&L|IlFEy^QUoR?;n!bWfU7kauNHECt+YU=!ML>$=%vA^ zEo8c53V~%Bj2Aq>j{)OR53omrvB#sX&@*;Oc$0_qRp4PgonFND^u{8#r-O^wo}OC7 z{5`aY?c}SAnC}t^FI@y2)L@KR#Qdf%Vt%oR%kyUofhVY5F9crFV2ob~9MfR@(G4uo zU~G3Ye*ri1S1I8LH{c~dQ^0=o;R3EVI|^8@*B3DVtpExV6O&a&H~1tCgDg4e|ahJQ}Uyi0*5si*_X0C{B1sP8Tr}yjK6q3^Y`{V zV4e%cSLOk0G#D4m1D@7k{P!im3Ju0>moR*q%o9~z%15|~flDLR9p|3ysq`iq#ZaSqsV#vBQ!%>q6GjL&AW{kF|y`~Ch*x&s@=S7tK*6%t-D6L^8< zfpeMPqq)rQv$-tSk8@eBhjM|18jNq|GXJ;d0-pm$CCw|jV5H`9KA+@pJ$XBa>&fqP zSZ^=na6S264(DGl;kq2o|B@WeFC&NZOV0sjlf7gE2Q(PFvVoUrJs=x&|C4bx3n_BjFtvvVY9K5cmdQd_INe30{}*(J8thJbTWK&p znauHbCUg9CI*86<$`g19Plz={AL``1{l8>2kg^e{O35}1;Dst9FHq%#sPyh z7^@_{NWz)pfFu`;zl>!%ha}uTmgzk#;oW1I&TSH2HWqjeF#eg&`5aFNT;#XXfhPf@ zB^`KBgHb_y$N^)4gyYhgjxO=`G0gAx#xR{b$1wa>35_xAmtGqU{6mBBt|EQNV{9jEhDAKhR*jKa$(ELo~lb^KdfHQ!C-xk!*+V zk!*)EN3tCb8VP(#{$d31BMrv<5zNo)!zeUNaageTAA_T%Vz>`(8M_+kn3Bs@7#(n%PSut~zzsoan7NchhbjvtpWn8JKd zmGDEIjwZ20!GRJV7Lp$&-!yeX`h6}{kdOr zx*vz%mvFI!fA0(Iqkf=-%Ot$B4=`GTQP&694H#|-^*+EJz&M@EVS6%gA7Cu%&Hc+? z^pp{-;kbAdUP?PmVv|ljGl%a9dBNb45>Jj0;A# zr0drc7~;a*za%l<&yzU5OTz6*j8~lmtkPg)Cb6HLB;Wtp1Ly^edbI~I+67~452kZf z4`7%Jb1#wj`4a!7i@&$Im@dsz^aMuvT|l}E#xNJ-f1m;9yD;}PjpGkX+$Uj~#`w7c zAD{t4U6@PzhiLu$5I9b+PSf{B07(E1+t33T)usV`UC<9`zRVE%Ul80dn& zQ}DL|s0MV}_Zk3nIv-dM==D(Zkorad{an!LzRFbM8$gB&x(7g>3;H|>^8lWq{2s-UAOD6f$unBrqHc$w7DVz%ED#2hcOKfdzn< z!V3XCUE&m83h19?0ha(?3a0@2afwq%UHNCTfVTiIg_8ljRpJ!>ndHv`uG8S9aI*$| zUKY@(!AoI4gFY+^*r35n;RS%6EO83gYtRp80{;zoDf}JTLnbg6@KQJj(67w|{s?#} zoCxS^B~IZVNN<_I^%}esHfhkcOu($pt{M1}}w08uYXafbVMXQuu%d{n#YnEe&1@ z|4jBW30O|{GD(e}^;;(aZvb8j2a}(bIE6<6y>JrHLiRHW7)16XaSC^6&<9QewrcQF zcpjkBBrL@#+(Py=5jaHlG*OLf^_>%epO8IGRO48E#YEs0vZsk^JgcWq1l}QgnyALL z`tb?C}i4;=ju%pfO4{@32J<+Up@i2S%a6tfz*ykoWh&PUrqpK zk-wY(x-(l(mNKP67#LJj)qall1nkF@?x_Bams55P;|Uh`VSp;uLzwUekg3WUuLJT&cIE1K%P4n6Ac``nGi78o*28 z)#U%uf!hHug}0HvO9y5EUJCn=e;Wf#2fP%z0Da#W;B~TBTDK?vHU@Z}{M#5c9@X>4 z0ROGQOX2UxpN#=NAp0Am#;1DwXy999f1}knRo^)p_zl_LXf>~)uNVzHOZGQfjbHWj z(ZCyIf1}knRzE%p_#4^ZC^eqdTSo!+lKqWR<6528@81Kw6uwLLHVXI)`Lj`K9Y9YW z1yqnf8>Pm-`r(nl<78hW)xHva_eh|(3tkF)QNL^?(8C2Ug)Zu^j0FCZ{Mkq~F4a$u z06qb{6rQ2>mEz>jC{F%t1n@EWvk_|Cse48MKO}!PLXAK5)DeK@f|o+LpdTF$oF#uY zT#ZZhy~BaWGCC1uun3)bBhW z_&(Xg`D$FOuQ(t09@)eBYW%CGpAY;K@KX3G_1}g9A+m>|YMoMV9SXR~9)_y%rrtCZ zm`na>s2X?b^M(R9l06Jn>x+8wP#~A=VW=91>W7B_2HC?9H6GP>4*?z{e=|hwW75|S z0R~e0Hbl)w>XU~61IQkRsCh~K^kCqJWDkSY_*FkJ82Bamlfi0Vl-@iT_zU2r@D%xr z!N8+{m%<-W|7kGr3gD&iW$Hf-0uGVC7^LO{^u2?C=gD6TQu8MI#zDZ}slPNxjd%5o zLBJ4dzXqxCx_;(7;92S~ou}qu^tSVWL1Yi-srg=g+j+px$se4j=IQm4^MGHGKR8d# z$Lne50UwY*I8Tj_^VFRaPEh#|P~(05zyRQJ zD*pj$KeXOF0H`5>HwgQ+KT~dJxZtj8~=yOpXUqoz5M}= z{6T*;AksJX2Og&K@2~a&=o$Tio>cz*)q0bDrXTQqD*t|JK0$Bm2lS@$@2A$E^lkls ze^L4OQ}bPVNk1T){6Rl8Z>6X817=YFrJtHV)sOWBMo|6ltLAC+eSLxRsr>t@`7XV_ zFK{p5rEoX(Px=C5sQ&j=^A-$$+c>nr*I z-vYc8-bdrPKEU6o{`XP)1oh*|z%DBPWVLTdZ%qb%M&+NZ_W$Zl$v{6U|75ivO`n$x zd<=Lg{0H?nl7ZW){wJ$_Y5L*bz)2dP^;Y{S^xeII|Dp2lt=28|wY`DYsr-AZeGvNO z-arqk|2)s4pY8=zQTg{$`+D>Py?|S({ClbWIeK$1;44)Bd+~gLrx$P|m47du7f9^| z+)VYq7taqI?Fn2@<=<27r_uNJ1bkHgd-8n1#-6}xD*v7`FVGXXirRyoJU?(I30O(x zpQQHD=xs^BJyiZlYG0(jEeTjn?E%ln=p{+OR;vF=YM+#zmIMT;{wK*iLJ#2URR4SM ze8RpSKnvidkmeQYdjMaf@~8bJRQ^4H5Y_)4YQL0DlO;98dJmpgXmGpaSqxNb?Ic@8YNO*La?R_Mwzhe}LzA z)I7>{RQ@tAldJ((QvH{C7Md5?K;;j$|47Y;jG+Di)I6R}`%nxjf0>Vkdd#~nmH4jR>P2#_ycrT!po&f{!ro^9; z_}dbHMdGIM&hR>enR3Ia>3@mB)(hX z1FvCyoe}wmNt_x}`i-SHy+0!9b0xk<4sI1n{ItZcl6c8F#=lnL2PD2#;%6j&uf+3c z0}_BAO1w$p&q};a4)*>=;yLnw$_a^|k@&wT-V3M`{(I6SoC|nzJ<}g1@w^Qjr;SpS zeygO<6?ijE&;clvcu^I{uafvaiC-)6BNE>#@f@+odnw)zSV0v7z;`9yD)B=S&!CA% z0RNQuMv4C@3rOkNjJ@<|5P;=59N#GA^{ZTt@09q|nH=9O@rNaSy~K|Qe4oU(3jQ{U z&y{$y#Q!e%O%mTB_6M1n`z0Na+V0ILq<-G8le94g}w|f#JtBu1|Ch zAVmZIAs3{6zLDX1Qr~Wz&TzV0ihkb^`W~S_iS(lZtyeO9d==x@3;vE73_qI5@VnPD z{4h=Y0(epQucdJWfbl|q%)})i!@OS;KxEgS>&T!2yXte zXL0^bQvO3lzV&jk>~hKfsN_%cV}yUxD5fw+;CluB8^QmBT!7U}@;@#5*(Chc%Ef3m z2zHB6uOA*A$c#)?-te!2(4@2=zcEBOrf$OS`LNeoY;iEWzGpUvR#HhL^m?>3=Nztrh>6Vt;#&F#nCCS-(%p zf$5UvTt25iVEA&uPZfN(;BOTCTSUJviGCjz{caQe{z>3h*D}4o3;c}0pAq;3fgcul zo4|WW{;dKZF8OCj{&aqW?8CK->2DJLCx0mUjbZq^_j3Hy6)fNJ|Ka!#`Z2thTwwX| ze1^Bu24et6dow(bpAW=$=Q6zQcMPu;_=}r3?h^REH#mN;z&{lDB7vs~{u@$$4@mj# zlk!Uy{bvgQBky5)bA|t8;eTa+=I^({fAyuzA1!*&?;VkEqrgvye8)w;G{N5~@Nt5_ z;*X3!un*JEpoL2SA6(4zJ#TXSHi7RH_{K{Zen8-F4P^Y8w{ZH4Jq(}s4#(SQ9G}#T zyKm^T=4%Q`RB-ml5a@V4vycs znBgVrbynatp?~!a3{NUxc!pdMJu`*z-D?V`5u$<`;(MkvXsx2lK*Lu?@q~o zujJn-`Og#lgOdMR$$z-C-__$;zDuP2UMuZ)+9d8D+#vTvq+Lbw1E*<08^AQ_ACyS{ zsaE8|8AjNY|<|MyL1a;_|G{GadqscW z6#b>sh5;HshpO>ZlZjt;i zll+fM`L#&>SSR-Vn$({IQa^4M{y!6Zm*l@w_&=4#`uRM_^89Kk>u266j$c2B;Wx+y zU$k$V{8RmV4F5qg!+$I7#}@IQNB^7Q6Q%yQihsFG{7Z@W7n7ERN9~1nUlK&*}Pp?S+wAe|%9}53R zh5!47|0dyow&b7mO-`R8@;6KVzrBFv-}wl`U-NSQO`mf7vk45Z|1QVhU(N8nA9H;E zg$!TY%JG%u46mnyegLMAWq30k@T2$&hUd}16~LdS3H|@W@qU#IZ~GU=drSW54{-dH zYQLLtM%wR>CNqBOj~Gsy97%sW|H<)X)W1k-M#&F2K3DZGnlbqkj{ii)C$%y@`i6{8 zj>`Dt&w`)+FsFY_@OKOTRieK(ftQK?l0|<9#6Da1F#gZPKHJ4UpBT>kt(Ersq05>7 z71Dk`P5q~&X1w$h#(!D$KbkR5`WOGA{zpA>qP(mkPD^nuVDB#(f@x)|N6M}ukV)r^&lGrxz$XfPt-#-r{7*^#4@mxd zCI6pKVg6i?GC!|N`Ozk9`mLh=5sfc?$?>aH|EL)|MZO;i{HVb93%p(6d7_`uqMvD^ zpC-}Y#p2&vrGD2-{n{z@o9;27{F`V4DD9z?^4lrpcfa5dy_?}_QvVK%e<8O^`0MsF z{8O>-44MC0E$#1f(*FNm`adhA{hdVp*Q90){SM>jiv5>}{ogC?@B89E4odsmChhOz z0?&~C*INQVBkf*M-%y69OaJhfQa_JM{rs!+ADgBB{DSl! zcb{Z_a;1K^$@utP86UUF`1lUNr^Q72Ef@UL(*Mj5{nU&9oFMu+EdF(&_{VuW7(ZM5 zW4idqT~nF=wc@{?SSRHt{>wX#;muT0X^n3s!;eY(bDOju_3{AGHXp;&rTw^D?6XAd zvqkLFBlh{Iv=4`+efhSu4{L@0w@mvJ`+7(0Z=cxTUrqay3o_|~O)B3uvA?q-U#rMB zUi@>3$oHhkcUa_W7x~hpe(o3f+C;vmM7|Xw-*Y10UXkxfk*`_g8!GvyOa3{c-(zC` zC&m8fN&X**{qGa~UL^TvNd6Z~{>R1smrMSsl7FG(pDOvMiGJ3K{%GSp>F1c}Z-mUh z9TWY{kp9&f(citI|Ie(1)uJBQT0Rce=7913I1rY?`E;@m&CreiGBNNeIuzE zsp8+iA^!c0l+Vw^e;gM7@muj9`$T_Vk^0{z{Tn)uPx56*{inAni2iBuFW(XU){B0t zrTuJ`{?Colezr^briuQKiT_Cx{kMw#^F;q`;(w-y{*Oxj$EE(YO8#lN=-e_s{(BwixxrT0jDr@${I`n`a1S${r3@ZQaM zU*eM`zE|R3k$9`bhZ278W_4(QeoqMeu)xo!{!n%N-(I)x(Y5j-d!vC)% zPMg{2cbBA37ykFsdPUD>tQ9z^iRxFntRK<2WwP(%k{>N2*_JJWaA|ejx@G=Njn#n#S?`4{U6xyN>v%y{@K2Nw*|3d%@Cre?xg?T?5Hp zvALwwClY05DG?X>1M?gF4N(aq>Dsgz2~b*z`esU0 zSskCrf((<3nI;$Rrhvbps;pJK&^k%CL5pez|3imiCE;X-4NJl!-;~Z3iun!m6H|+9?q>a z%SSFd%jb5xE3JGwp|ZGZWWu?a>7{X~YI4`G*qvll875I#T3%Qg!DJYgh`e*^W--}# zmyEm2^Wu(}wsDkQ>%Y!dUR_;R!9`;>t&q*u0@q76HpRuOK^@yZcYAVJcZn@Dcb$JjRqeURu)f}1)rkkm_SaT+Ei1DgS*o&o z2Ntcfewh==I?T=zEX<9WUcAUTlB=f1)ZiB?`5dYRF>0>$7j|MX?sdx+FDdoKv$Zl` zS%%N;E-helvay%IcjQ;p)ouzjG*$%MC8b%ua0fgwYNf5eKn~V&fE}S&PDY`GD>o6a zU>y;NbR{ZZ4R1mYIv{CNKs-khr>eU<73dJH3pHRtbPcGfD(r-4hjl75x3X&e zdVhnzw!-fVTwm`eTbu7QYnsh*i!nHCB;u=>$DWHL`sLIOU0i7nlsi*@9J=w9?!UG$ zUg9pOdVV}r$H})F3eNMDUK&lE9ArG|oqNR?M&s79c)hU&nS~{lZa4Ye3}0cHk4z=g z=kwQ=udDX^s@4~-*wj!O$jtEh${YN?P31NI!qR+SX`atL%jer%Uft+-m*)E1Gkt72 zZg-dZvhg^HUwEp~sC2Ftns*_gX88)sGJK`^KH;nrM--2-i0PZy>R@w4u|OxjC|=%{ zGm3G#rW=tCK&1PRV7W6?Zf?x!sV+4yOg}@Ks01iX*Gd%Cq(qSDMqRHbXHiYoRvI9S zt#914(YMiGUSHT*8ON())s+#R$h}$hH%jOG$})XrteVn5X132)UU_xnrofk+QGQMN zHU94B5hF@zKJ}hd&FsYfLnF5?Tw-~?0>+3=od`@5sY0)uYBc+qaN8SQ9ddoeK7T{`Ccm#d;G<4| zNo-#t3MaRs+FvdMEG}%jRGn$gBxcz;iC#|syXi-~=Xa)ybCO522EQ0l;$L{T>D^vm z6ZF6HYRa2@_W7EyW=vIy>xxPltNM~k#X@Fh>a)^CtDvOPq7W7$HlNNk6{m7=!OrT8 zvoAn7T9)pRV(Dj*Jz7=AUW8rjlGjw(TyBQXS6^4(eJ2&`aH)PMM^(nX{?b6+7s{an zH&`0T-&EJ|1>AUtv@>Vbmp25e%By`UlZv|f>$_EKr89kz`IKe&%BWEiO@#+XKHvJP z>gvLxb(wB=j{4K`N92E~0_XacEtW>0G!T`>@~u{q2=j=l;g!)fc1dX9V8JWzF>Y8f&X+t4OU?H~1@+z`oju2B$^@F|&x< z@?>SiqhsTgm0{B0>X((1DEkP+C|s&;SyoGxNJW)3mF{>VTdJW1WkfrQrw&y@NZg50 z+IFOhDBR9fP+7wc)?_eB@T$5^YF;#_u(m27lPbQ-O?AGF<+YX7)Fbd!G?n|x1OBEe zSzZl9B3aWaaTgiF(5U4E3t;Q@#29mCMx|e-p8f8UN}h~Q)ThnN_7(daq_<{(!mXc- zqAlJMtZP~D#Jo#88YMw#mM?(}QSEMwETx%U#I&SJ%v+^9&r)hl$znC!FAY#jqn4n3 z>&h#6Qd>c4uS>wPIa`(KD>moA{jy$?sE8J1xoeg!?r7Xmvutr?I7MO^O#Q}65}SFn zZo9&86pk=%4aCd=Q*^RSC10j(O0zUD)5pyWwE(4oteFkvwUu=>zVeC+|E5jWnwKw7 zzQLWN+M`OhyQIulLYoVynW0(w8h=eq-Dc^aQ~OQ{*}>8XTw0$Ta?a!uNC(-Hh}YIs z?nUb|-0nQ{d#CMQyg{#4 z+FU}jvnGAZ6PS$0C0rV(^JLYu7>ushn;2ArV4bG-xiJWg2;ep_zLgD4pbORfu!cB{KQ81c}gQiP9=d?KYR` zOc&glaB*nc^;traGih5%#g2Y02Y!z8>w1+q#~F80mCiwKQJRgcf(b0PyB#ajgxzY^ zn>tsr)|jZv?yP6iv3+tMhfJ2Lz1dk<=qq)Lmb=)IkIltysodY8PpU7)HfwdIqBe4y z`>Z3j3|a&1h_9I$3uqOQ-hfE3ew*cQ-WDHK;8#Jfp!o+WbC$Q-!~t zcBPV?l@-!1C6&P1jvblFtVMp+y)ZZ9L~xMLI~~{g8{AcKyRW=?F1~oy$UTx~REA7} zd(rGvBa>%l5D$Tkq@|cHbXDxGXsZC2F!!yPaD-?H0d`g*Fe0|fB&tSgqNA#IlfNNA z9i%R|0M^dG(m+;*IdM^~wy^sARMI7jD>`m+^3|#&^|ZpoqI8*8P`9wEI^b_`FDv!M z6lhgtrOF{f=$TzWh%B(KU?f&wj<1-fTUm!))*ns7Sfh&sE^g+RL9@ZH7M^H6SZOa( z8EWM?p83t1Us=f}&33AAt-4SW7iU4Heam}krcdo*&YVSCr0Z(^wSl7Yb^hwA+6}hz z0HqnejmzSyoBAEG<-H&)ULK3CShZwU#pdRv0_VRqm(0vBB@#T-6Y0ELSV(Wmhawot>tp@^w|4GiidQqPl$3CbhT2S5>>d zPH|UO8cUBdi8@h0D2Qy;kAS6+KPwb}F~Gne*I z(icfbMamdmqFS=J;v7+`I!AQYpCdXn7CsjmQk&vb?eUdYZ>Vdi3T&+L1@c!nqW${Q z{mKKX^JVgP^yw}+VrD@jj@DSGf-G=s?Tbwld8Ew7M|9rQEwU&_Vo}%50>yYD>cb?4 zMv#rc?U-`-^i5*=F%l>Ad3HV%CbF}eIFU^s2@}OKpD<~x4DnBN*(K=cV3(<_h&6IM za-?A;hS;?ciDOqmBqUbuICdJ5Wvm%T44@YJJoWOT0fvHh-Jy zj_gcD9lWv<*;du!Ez`sf?xN0Kb(K(@VPXPGaXVbJ070jjx|&^_EkJZq%tG%{CY|Ue zP9FAx3TGk9FHZQ6x0+-Zz{XnCLb;R}tx{UzMjn*1K6g%phM}g;Y((r8!v5Kox^vn* zB+T~3p>M7W#RJKKE(;XBsnxXDxchA*81D=-{xJzNAwK65(&ruE*RyV;xU551?995@B@N$w(H_ zorG=@w&^#XLyF%tM(WC{p2AsVEr|g;thI4Cu-DbjDv#+hEGF%qro+0^A!Zkj2raOz zB&_2v?c|tgUN-+MrQy60im!3Yf)gAevp5J#+f|E7SPWm}AeqTem>p7n9ND^imMk`d zFLInLmRelx#p_O<^AlwzFP#--JZ2J>4PMs%QctLbbNccRsabh_Ip@^6SfAo3-PVnbb6U+}n^a^!+GVz_>YPN( zfi(*);hD7#QqTlL#IZF~xNr|3x`xF)xz;I~d3ep@C0fYN+>^tH#2>A4!&6&SqHqthp6Lg5oh15 zq#d92vg0Q{^A#rM>;zmG?~5FQ6Dl2OEcP6nm9wR(n4@r3l8!YLb)qei<4vTQDD-fq z9cU!MnK)Aqao1|>)5y_lHFlV7q!HC&%;(sB^_f9(yzKI#qfXnUB(>b@(luk10fS%82uCw&Z*kqASNM5*&dOT72D& zwZ%E+@LM=b>S(ESU+(!gi?1&3oQ>Dzvd-_Rjq(=7SPy2&ZMTqk{iiNulgQ70SqI_5 zV*Ot@3l}y6(@>@Qc6ZlyrCN;+ABRhr0Z&h4S_z_77uvljOHqp%;3yx33S?osIpt&a?`cPb$gIFsh z#X7qxz~tT%r{i;R7O!1XZ5;CU14q$E<1Ff5qze@*QAb%isx6&%-<;(g7Ps47tIjiw ze?~5BLd|N3OoT}7b$aa0M2je}PR_BJ zIFVU(5h<~{zS#LUlc|LCyLSYxbF#KmaAx92#mGT86BwcC=(BKf6U82Wi=Qm!{2C=& zv|xdpi}eNQvf)hle2a>fUOK)P2LXmI!ra*%PnG$A6m>tVPW-jGX8Ys&1`Ks#Zzz&_%4OFc6WyU7X&5H9a z4$<6Manj|*ew!I5kC}1un3)>~cV_IjSvhgO#lf8w2RAQQl$0Z39K5_(ysV5^S+g=T zV))3)jFmMjGgj8D%vgP9Wyb0*D=SVOS#k2nijzlHw#XPqo~&3MW@XKk^l@a(%8!*b zJ5HY2apcO5lUH^w+#cWhhVmLeoww?^en{nWR7DtvO`QJ5_{Ffh^z^I@f@Wr9%*@G2PtVGlm6fBCuk9Oi=yke0{tMuhPyh0HZ$RrI@7Pgz znEp%W_W*|h*hujNzdq9#a9YwNOT0Ir{`D0+pD*BVk~iz==kt}hSCk3R_Pb#KM~Ge$ zfHO0IN%ScR@?tOle7+486*fl|P46b5^R}d~9tgPP?|46-uT;4&x(kuu(e&2pjCG?R zmz>A^P0j>*(YF}>NZyLN@@ii#5sapHOz5QzWP*o<-il1106@POdX?pYayz|(`DOEk z8Pd}j{<}mz9Ul)>mgcXY`G@>y@}+iDxIm)$TPx*1Ptuo60D79paehAEx=r%rwEFp? z_%rRo{x+fAtnAJ9>#s@PU+g2_&7aTb-^dPtzeMY=P3U<<@vTCyIUAs-Gy4G>O~Kpua+xOOFUQ>gsc7-U%8!v zdFG$Qs7YXQ@Ri^T>Y-Op(7iL{-WlAQeD>_wko&IS*8XSDp5?@ogIjZ~$h6?r3@aj> zO-p(7Sx-fq=O^TVRMzw%c*qlMnf&?Lvy#Eq^h9{ULuQhuL`g!5lA;@GQG|l8cud}v zNKw3mS`;mNDrO+KHBDv0c?GxDpFMl_(AH^Z&z_|!b;!Mi1UPYbFI8BlsVqXpTW&w0 zc|yxihnB5PdGtk3#VoPrIb+OmL9@$pbh zS}I&Kg~iwLD88uJ?THj$(TT_)4!PB<$0zRWNzy7twqtwodwK1F^A!QIZI36UvfYk3 zXuQZ1V*Q27TB$;GO3L*7#Pm%JqHiZ|O#(vhEw`V+Eth#hN^`L}jrD{S?<%Lkl2Zs& zi+a7_zF#|;^n&|-Yvu979Z%>kPSK9=Q0oxUL4pMft`JWwj@yN=TS#oZ|^8hC?mNxY@EvBPDDL6>qWVdV=n~A@|^{h{p96KWx~ z1~t!tJW*-5s<`)=iHi4mLc0jv6MBkjuP69a`sWn95`2Lwi&+CtlK0u?32seQX*rOp zx4rnilqY@>+C|mC6KXu|3Eh0WD5RJw3hko$RuuX+SE#4b6!DKep~va3;I6@+pFLX? zd~RFIAoa(q#lfQmA@$WAeDzS0YG(?z?;@x><%wSx1j{BQdx8ZMQn}KH+($hvEs7`d z7_4rV?e;b;s5Eh+zY7R${fP2&AM}K_o>4rw53oeyK0@vTo}dDS+y{dy^@(eH0NYhL z%t6YQJ#h=5HRNtJ3t&GNfZebfhlkwT$Z%85uk$_&Zkz?4tp5SvzSKHn1r99CVYN5Qz9eF~H$33B^ zD18xox1!K4Dy5>9Z!`8=MZwjnDNnQo7bG9*K{n(GK9wwVL%Zm&U`vuJwr7ijheKQG z@A<)JRc>T2Y$Po$OwB5ulkl``m2aGOfmu3-!==+6ornyO{3exN$bDFf=w%IHB2z@s z*NAR;AtK+V1>M`k*F}3OcEs#sqnsG~UH14x zhuk}Y?js@hk(4LeJhvXC-puN&o|m$aA`421oXkk@eN=+)eNU|Y?CjYSpZx3W*%Mi0 zU1kMk7S^YXDF9N&sPPE(r#!cxDk<33UKA=$^#pG|;|VsN_5_z7_XLZNtuF|=|FF&d z`=Vg+;Y02Nsx3~@fb}8wA5xxa3l$&G+JnzN`^lJRPmF5mpM3kD&P-|dgo+QZ4{Fr4 z3Kbvrq&(Bd(=voGL;YWDNlFe<1wUniLPakosgL_Z?$)hsO%p=yR!^w-jOt(AJT&Cq zfBPBSq=(%5`Rla&_M3X1xSgs_$bH5WDsHv8scp}0U;g{NR|C_y#}@^Q(@H#}T8U?{ z-Tllq_tV?mPtQR>C->uS3%U0P-3L!Dqind(*Xju^Kc;-fZ^Z$G+y|5q9H2kW^iH|+ z9i?se;q`|=CUTr*4JzR*njv>v(A`cx(fyLDfwvzlx$61I^5BAKQ6m@6Lhz4cfU3Vf#mgv z-1qm!$zBxpz;^eqQ=VvBA1b~-<(Xf2Ld(yDn;-YlqL$)4p5V8OAmqN&o$^du$`frx!Q$rm!Dl@oce5v$G(q_hPf#gHwX9DD-4BM`M?9g%Goj6s zxmj}`3qF1mdCA}l1);6^Wy{rUe|xCvgo@ie z!B@zma=$r(_TC~zTX|qboBoQRP0hCZ!Jzw*|F6BP500z4?!T3GrPbHdVjJuR49*9| z1Rjx$KV-*X+48fWuw@j>HZC8#tfjRjS6V5nm61IVM1;YQWm^rw8BbwS&){j@DGj7! zsLeo-V#hcck~)+LZbE`eJRl=YqqH=r85eiXz30BYyV6>_*!eG-vG3k@?>YB;+^=)q zedi{u!$McjJJykNKNABq^??tSl{7(IBq94zi20V&Q!ks(05i> zzHGfk(yDjfZ*WST*WzL$@P_TY)GB`c)J`PnS7I-ftakRcT%ehLaCIU{zm(htMPE5A z`pWZZyd9Rj(G)3pqhBfBs9#mSOxjXAriAsPkYwfF&x`r&h?M5unId;E(QtU_smi@R zkvo)Vc%E{fP7yMTX~(y}+p2ZN5|l+z;S? zsN~9G;06Muzvab1C4h<&z_YFxm{2eei}DN4{8wKe3Wa_7=gJVKA7qpMwGSx#j{<-; zGJj8*fkKI{nEV*Cy)yF-4V^V!paH<^l=%m*^Z@+U2Y9~g1)fW_s(#7wbd<8k$ZB(? z=Z-s`F9Xg)=bfI3gmu1u!qc-NVNKjd^?Md&j#(bU$0X-8Yl*4QPR`KgEjhA96@gz( z!eq`m*07zhhP8BrOo983%W0hIyReuc)zQSHAEA+cQLHH;>rOgU+l+ z+MO1i$q*(B((QHJ+k_tb$EsXe(guP0 zXG;H?ZSPp*i#a2NJx7|p)X*8*@rdV+H9YE9zwG`KRhd0yQa72lAGw-f-TG<2U?)oK z8&BEJkUjLjE9{|jx7kUr-yWJM6b)qQrPT zBlaS$%rjsF7rgxy6}R6qRe!zjIFBE#U!Amu)y{7G3EG2xubi(7uz_`;n|IOdcmBzsX%u^vpurX*f;( zC9&V?AD*yp9FqG?Wog<8GUOUY?4A^`0r&EHvu&OICR+sX8a^p-)%XL;p!r zK$Q?O^=4hd<4Q*aeC3o6~dsnyq! zlh)}p-Pb(^t2dEs!wTaPe!jyo5g6n zFFjTHl-v-|L{Pb({F~N0Pgw6PN8Bf;kMspmdfnAtUap8K>fJR@pJ~)6eZ}1`rpZ)o z1?8kKCM!7yKQb2ZkT*%Q-UTYP4AuvT{73)YwVP}G_OS_GPH4dwi)FoE%s^>QE*DQ~ zq37z(N?O84#I8;`&1t`u1@kN8ZgW4glbgpyGr}WhJNyb^KY5v0l4SPUiI?O{iv8qb zA(TH%=AfVqyX1|gU)({(t0v)OlwNDuqT32fU(hO@W$DYG za;v#JN%0>6apZ z{L>2;MlrfPA>NKT)*+kSd5(3!PNq5G?1Xi|cG$6ZNWM-x_p1V-KSHC#Es0HWuKGr@ zVPGmI#26~ElbiL@QJkAEe9!a(!yz}8bTT^DVLM4)5}qLBy8!8HJi;X1AxWZ#T0LEu z`_ky(E~BKjPwP+QBDUy3ikn1I`a}0;biGlGE>%J#=e}&-I+}riocog86HM+w*K-U7 z|8@%glxW#?I!n;E8FaS-wUgFBI+e8nC0OBFoUjvDWUl67L!Ysl)0mP&hh{0tiN5IA zetyNI#$3`XXFq3UAjS9D6y0YsNIK*4eU^!Tl}7#}Cujy1K3pL%DL3-rlHx~u87lL6 zB+K)=vOHg%<#~0M=hvPaBVX`C3)Zph9CAgOv?`@7 zCGlnM&=tL&rt_3>phk7tWI*+tmHyGMh+@?dQ|1V*ZM0+$i&rVyOemH)wI2Sl|CA?) zQd+O5JzlrmKlMznOTX#gu1IsdGq!A5+EG}I=xg}5XHVn$jqvmaik~5KKM*J{_78Y~ zg^->twKmuB!43b1+`N}sF#?YjcyO++uw2KV_q;GWmu=u8v+<-*@t4KA1m+%*L$ zx$rTxoZ8!@!KouvxzPEf26syybe_=Q93BL@w0B;ETb4(A3ky?n3FiUVuEAZK2izeI zE}#DLt_JtjJldP(P4Slxo$EC?HLv8NhetKIHF?nass@)2ogZs(`Sk0XeJQyt&V$Yt z4UUf~<|LPAHMkq|Xzy(ePMsIcMGxggDgN@2=V}cuAO0TH;1=b<-w_SY%mZ#rgA;F! za-2WEM6ZN+9m{9@-K)XvWK%5{Jv^+?I z=cnhcC=q9s%~$=v6M*?oobD(F9q2dL76ZEh^9!6_Q4H(>%-1oQ%CdlB$u zz-%f4f`Iw154aXE|IPU=3i-H-HU*d5r<=11XxD; zEdjOz=9MKtJ76C715uK@U!0>iZ}5v#73MFCfh~X;F9x0j%*tY56JY*JkvN!d{$&xc z5in;I$?}(cz%78ehx=QT59k8St9`&Wzy`9u@`AphFwYbMHKfl1 zAP$IafCn(wqAocK}9LDNqdrDXjvG zno{w7L6Fj&G&D}0)muo1dP+g z;)qF*()ECGpcwcH5Tx`Lz_`B{xB>`L8UT#>#p0WYAf-OQ7%u{`7NoQQFpd@h-9V7i z7+~}j0bd7#ly(7zT?8}(K}xrgBcKSV2ZEH^fN{nLECGU)E(VN)K42{nq;w5nwEBQW zK#d6!s}Q)2^j|2(iBVGs+zJFKT|oLT1VW_$ zLNRX|rwV|%r2hgjpBn=O;=E6gQiJqg0926v3&gx{%qtL|p#&+NL;ClK?@of0UPb!% z05eJd9x?A5y&m8u(!WQ{*G8>JboL;nH@1AB(@g>b*)`hfF&YyiwA z=IF!~f48%M^2Kn!TUteV4?yFP{?<|65BEEyjg&6{&hmF>5z3ds{SK&;^K*DZ{u<{G z^Qo@iaekQdkC9%yeV}!N{$^AHm8E?Md{O2Pae08hAG?*yZ7zQc039?9@b^dMGYKA; zLxKXZfZ>PfAQ^yf8xnu(7i6B?5(KaQKB$oJ!z`b>8NQ0?qjietJIL@Mg7@}efXm0Z z{0x`VK9kBDx&1Xr_!`dBAxz@` zMW%lb(_h8%`5yE4D3@>H@(Jc|ph&{sK1*uoPNCmEG_robTO!N1(BVQlcXgH2&wO_L zeZ>0f<^FSs`_DMbcdn1&X+Y5_#49BHJT~yiM?m!d6_>A?AP#ApN?4){Rg9P`Um{7{bBw;70 z+lPaU{}kgNWc<%^d6zhD2?S?J_$qen%x3%n#{V9dALR0C8h_qC%wR|QADF)b%wLG* zJHYhc#PY3W`Oe_+(Zb)gZk;XLKh5wqm*39%f0+BnQSQInnZ5z0ubTNk!}LAJ`Wa;X zJkI)2r))oB{s)==7nr|RhF{M58=Nis^GxRND9g8v;pZ{@e1@N3`9>K&!1}itew^Xy z)Gx`ekMaMA>F?w6x468D%b(+No6G6^F2SE>{=U!r)$scAdoB;K{)f4|i_33R>jSTk zH}d?`$ny`ab=vZPj-vE@zb8FEoRO#ff)4*Je*dHXuFJr;DI@c!2w zn%ee6+S(B62)DO)(pS)-ZF?fIPQt!m!CGrmXqC18&bvZe>YM5}S*siCw_58Px3y96 zlIC5Foo&%LL5JubSxQpZaPM4FC(9StiAJ>2>YBBYxYZkv#G>K$y3T04vnOVWs3Fx0 zn`L%WEZjWlGH!*jacASUo!eR)`KC|CAR5RTyd^W*4b^UGZxpR&hoUPRb;7IGM&eEJ z&RBRyL=voS8)X{msx6nu>IHSX8X_HBUB^hLfc~z!oE5BKwGpWv@z1%(-8@E#zGq-@zz-90|X&1`K2G#i%6yQ4J+4L zA*suyq57`&`i}69h$39nTk6(pSlP7h;&m6SY-*}syCJlGI%FnyONI-qzi^v76}8gQR6d(&m^+Q(hw{K=oMhQ^m(sL6_XfT2oy0 zb?J!c@=l2*)sSkL!#(lUY@*h*(VM5f_f5cR5u@|wSB9;+RZ1=^rSh^P#ZKzhI=Tuy zldFqSs;ef8TD6NhRC`$>pk>z7#E=EIjKigv4J*psxV@Ru%z}nOkBO=piqu%8@N~If zVncZ7vVzr-?s%+ocjlEh*@`D!UnA*>*R*wZ3#}}qFeVfRj%N6l?#@taINH)q&4rqK z!=Z3I(ko&piNOD9K#G%$$*6k4%9fV8)^J<2uCpa#iMzye5P5Y~OGKR* zDWI*?d5+O+x#`Ol%9h*CA&xCKJUJL+&OvNtTNjOBE2~^2kf%GDr$@fc2*}A?)$~Lu za+nBB7HJW&hqEEzhO>p#)S8_-a4o^-FgnsB`7@bILLsq`hqi@VLSpRjY%|@MNe}QZ zNXR;)Us9++mQIx-v^^5t5pNAeVzJIx<2tdaq)A3xyBlf_$D3)k5RYQ+9z|p$(H6J- z?zLO)-ms*3*Y@^sG`e_w=Z+nbSWR2BJ06ZUN331e;&$2}gI+<*C(GJ0kH= zS7&=$^X~c$O;w?#k&dqT?oc?on?kqKlj=pZUeOb-M_469PFNe)HD4x7ZI=np_REBa zLRw!A32kT!RWA_=429cwbjI4^tsSBG<&yt~rqF^aiM=^O+v#o@&grr>u=6sBp{YA3 zM4=tMy`ip1th+NBZf}e44(*~)a+hS}ailr1|L5N+vyC6PMj+?egqzw(%czrP4{=#8 zHioL2qnW|gFJh1?gjsD7RUx}M!ZmWRNVsP9*_6yiRcZQd=*2F;)_2HxZ{{;DRlOu! zWWOkurZr(y#c7wQsjV0Vu8A&h-jyMvaAjTjCSBY*%}(^4B9sL|WwNDQ-?wMmb-p8LzGmh2pWENGQI$D|NwHRx^hQ zpUM!LWa)6-OYs~jIZZE&$>xiKowln_Wx;5`e-tBz@i*mXzNDzSOcqRvj6Yq;k#3l{ zlDTA?#>JG6S>pnIcFTrQ +//#include "cxtypes.h" //AO +#include // + +//! Type of chain codes +typedef unsigned char t_chainCode; +//! Type of list of chain codes +typedef CvSeq* t_chainCodeList; +//! Type of list of points +typedef CvSeq* t_PointList; + + +//! Max order of calculated moments +#define MAX_MOMENTS_ORDER 3 + + +//! Blob contour class (in crack code) +class CBlobContour +{ + friend class CBlob; + friend class CBlobProperties; //AO + +public: + //! Constructors + CBlobContour(); + CBlobContour(CvPoint startPoint, CvMemStorage *storage ); + //! Copy constructor + CBlobContour( CBlobContour *source ); + + ~CBlobContour(); + //! Assigment operator + CBlobContour& operator=( const CBlobContour &source ); + + //! Add chain code to contour + void AddChainCode(t_chainCode code); + + //! Return freeman chain coded contour + t_chainCodeList GetChainCode() + { + return m_contour; + } + + bool IsEmpty() + { + return m_contour == NULL || m_contour->total == 0; + } + + //! Return all contour points + t_chainCodeList GetContourPoints(); + +protected: + + CvPoint GetStartPoint() const + { + return m_startPoint; + } + + //! Clears chain code contour + void ResetChainCode(); + + + + //! Computes area from contour + double GetArea(); + //! Computes perimeter from contour + double GetPerimeter(); + //! Get contour moment (p,q up to MAX_CALCULATED_MOMENTS) + double GetMoment(int p, int q); + + //! Crack code list + t_chainCodeList m_contour; + +private: + //! Starting point of the contour + CvPoint m_startPoint; + //! All points from the contour + t_PointList m_contourPoints; + + + + //! Computed area from contour + double m_area; + //! Computed perimeter from contour + double m_perimeter; + //! Computed moments from contour + CvMoments m_moments; + + //! Pointer to storage + CvMemStorage *m_parentStorage; +}; + +#endif //!BLOBCONTOUR_H_INCLUDED + + diff --git a/CVBlob.framework/Versions/A/Headers/BlobLibraryConfiguration.h b/CVBlob.framework/Versions/A/Headers/BlobLibraryConfiguration.h new file mode 100644 index 0000000000..45b9410537 --- /dev/null +++ b/CVBlob.framework/Versions/A/Headers/BlobLibraryConfiguration.h @@ -0,0 +1,22 @@ +/************************************************************************ + BlobLibraryConfiguration.h + +FUNCIONALITAT: Configuració del comportament global de la llibreria +AUTOR: Inspecta S.L. +MODIFICACIONS (Modificació, Autor, Data): + +FUNCTIONALITY: Global configuration of the library +AUTHOR: Inspecta S.L. +MODIFICATIONS (Modification, Author, Date): + +**************************************************************************/ + +//! Indica si es volen fer servir les MatrixCV o no +//! Use/Not use the MatrixCV class +//#define MATRIXCV_ACTIU + +//! Uses/not use the blob object factory +//#define BLOB_OBJECT_FACTORY + +//! Show/not show blob access errors +//#define _SHOW_ERRORS //AO: Only works for WIN. diff --git a/CVBlob.framework/Versions/A/Headers/BlobOperators.h b/CVBlob.framework/Versions/A/Headers/BlobOperators.h new file mode 100644 index 0000000000..8be3c2823e --- /dev/null +++ b/CVBlob.framework/Versions/A/Headers/BlobOperators.h @@ -0,0 +1,754 @@ +#ifndef BLOB_OPERATORS_H_INCLUDED +#define BLOB_OPERATORS_H_INCLUDED + +#include "blob.h" + +/************************************************************************** + Definició de les classes per a fer operacions sobre els blobs + + Helper classes to perform operations on blobs +**************************************************************************/ + +//! Factor de conversió de graus a radians +#define DEGREE2RAD (CV_PI / 180.0) + + +//! Classe d'on derivarem totes les operacions sobre els blobs +//! Interface to derive all blob operations +class COperadorBlob +{ +public: + virtual ~COperadorBlob(){}; + + //! Aply operator to blob + virtual double operator()(CBlob &blob) = 0; + //! Get operator name + virtual const char *GetNom() = 0; + + operator COperadorBlob*() + { + return (COperadorBlob*)this; + } +}; + +typedef COperadorBlob funcio_calculBlob; + +#ifdef BLOB_OBJECT_FACTORY + /** + Funció per comparar dos identificadors dins de la fàbrica de COperadorBlobs + */ + struct functorComparacioIdOperador + { + bool operator()(const char* s1, const char* s2) const + { + return strcmp(s1, s2) < 0; + } + }; + + //! Definition of Object factory type for COperadorBlob objects + typedef ObjectFactory t_OperadorBlobFactory; + + //! Funció global per a registrar tots els operadors definits a blob.h + void RegistraTotsOperadors( t_OperadorBlobFactory &fabricaOperadorsBlob ); + +#endif + + +//! Classe per calcular l'etiqueta d'un blob +//! Class to get ID of a blob +class CBlobGetID : public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.GetID(); + } + const char *GetNom() + { + return "CBlobGetID"; + } +}; + + +//! Classe per calcular l'àrea d'un blob +//! Class to get the area of a blob +class CBlobGetArea : public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.Area(); + } + const char *GetNom() + { + return "CBlobGetArea"; + } +}; + +//! Classe per calcular el perimetre d'un blob +//! Class to get the perimeter of a blob +class CBlobGetPerimeter: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.Perimeter(); + } + const char *GetNom() + { + return "CBlobGetPerimeter"; + } +}; + +//! Classe que diu si un blob és extern o no +//! Class to get the extern flag of a blob +class CBlobGetExterior: public COperadorBlob +{ +public: + CBlobGetExterior() + { + m_mask = NULL; + m_xBorder = false; + m_yBorder = false; + } + CBlobGetExterior(IplImage *mask, bool xBorder = true, bool yBorder = true) + { + m_mask = mask; + m_xBorder = xBorder; + m_yBorder = yBorder; + } + double operator()(CBlob &blob) + { + return blob.Exterior(m_mask, m_xBorder, m_yBorder); + } + const char *GetNom() + { + return "CBlobGetExterior"; + } +private: + IplImage *m_mask; + bool m_xBorder, m_yBorder; +}; + +//! Classe per calcular la mitjana de nivells de gris d'un blob +//! Class to get the mean grey level of a blob +class CBlobGetMean: public COperadorBlob +{ +public: + CBlobGetMean() + { + m_image = NULL; + } + CBlobGetMean( IplImage *image ) + { + m_image = image; + }; + + double operator()(CBlob &blob) + { + return blob.Mean(m_image); + } + const char *GetNom() + { + return "CBlobGetMean"; + } +private: + + IplImage *m_image; +}; + +//! Classe per calcular la desviació estàndard dels nivells de gris d'un blob +//! Class to get the standard deviation of the grey level values of a blob +class CBlobGetStdDev: public COperadorBlob +{ +public: + CBlobGetStdDev() + { + m_image = NULL; + } + CBlobGetStdDev( IplImage *image ) + { + m_image = image; + }; + double operator()(CBlob &blob) + { + return blob.StdDev(m_image); + } + const char *GetNom() + { + return "CBlobGetStdDev"; + } +private: + + IplImage *m_image; + +}; + +//! Classe per calcular la compacitat d'un blob +//! Class to calculate the compactness of a blob +class CBlobGetCompactness: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetCompactness"; + } +}; + +//! Classe per calcular la longitud d'un blob +//! Class to calculate the length of a blob +class CBlobGetLength: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetLength"; + } +}; + +//! Classe per calcular l'amplada d'un blob +//! Class to calculate the breadth of a blob +class CBlobGetBreadth: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetBreadth"; + } +}; + +//! Classe per calcular la diferència en X del blob +class CBlobGetDiffX: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.GetBoundingBox().width; + } + const char *GetNom() + { + return "CBlobGetDiffX"; + } +}; + +//! Classe per calcular la diferència en X del blob +class CBlobGetDiffY: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.GetBoundingBox().height; + } + const char *GetNom() + { + return "CBlobGetDiffY"; + } +}; + +//! Classe per calcular el moment PQ del blob +//! Class to calculate the P,Q moment of a blob +class CBlobGetMoment: public COperadorBlob +{ +public: + //! Constructor estàndard + //! Standard constructor (gets the 00 moment) + CBlobGetMoment() + { + m_p = m_q = 0; + } + //! Constructor: indiquem el moment p,q a calcular + //! Constructor: gets the PQ moment + CBlobGetMoment( int p, int q ) + { + m_p = p; + m_q = q; + }; + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetMoment"; + } + +private: + //! moment que volem calcular + int m_p, m_q; +}; + +//! Classe per calcular el perimetre del poligon convex d'un blob +//! Class to calculate the convex hull perimeter of a blob +class CBlobGetHullPerimeter: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetHullPerimeter"; + } +}; + +//! Classe per calcular l'àrea del poligon convex d'un blob +//! Class to calculate the convex hull area of a blob +class CBlobGetHullArea: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetHullArea"; + } +}; + +//! Classe per calcular la x minima en la y minima +//! Class to calculate the minimum x on the minimum y +class CBlobGetMinXatMinY: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetMinXatMinY"; + } +}; + +//! Classe per calcular la y minima en la x maxima +//! Class to calculate the minimum y on the maximum x +class CBlobGetMinYatMaxX: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetMinYatMaxX"; + } +}; + +//! Classe per calcular la x maxima en la y maxima +//! Class to calculate the maximum x on the maximum y +class CBlobGetMaxXatMaxY: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetMaxXatMaxY"; + } +}; + +//! Classe per calcular la y maxima en la x minima +//! Class to calculate the maximum y on the minimum y +class CBlobGetMaxYatMinX: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetMaxYatMinX"; + } +}; + +//! Classe per a calcular la x mínima +//! Class to get the minimum x +class CBlobGetMinX: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.MinX(); + } + const char *GetNom() + { + return "CBlobGetMinX"; + } +}; + +//! Classe per a calcular la x màxima +//! Class to get the maximum x +class CBlobGetMaxX: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.MaxX(); + } + const char *GetNom() + { + return "CBlobGetMaxX"; + } +}; + +//! Classe per a calcular la y mínima +//! Class to get the minimum y +class CBlobGetMinY: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.MinY(); + } + const char *GetNom() + { + return "CBlobGetMinY"; + } +}; + +//! Classe per a calcular la y màxima +//! Class to get the maximum y +class CBlobGetMaxY: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.MaxY(); + } + const char *GetNom() + { + return "CBlobGetMaxY"; + } +}; + + +//! Classe per calcular l'elongacio d'un blob +//! Class to calculate the elongation of the blob +class CBlobGetElongation: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetElongation"; + } +}; + +//! Classe per calcular la rugositat d'un blob +//! Class to calculate the roughness of the blob +class CBlobGetRoughness: public COperadorBlob +{ +public: + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetRoughness"; + } +}; + +//! Classe per calcular la distància entre el centre del blob i un punt donat +//! Class to calculate the euclidean distance between the center of a blob and a given point +class CBlobGetDistanceFromPoint: public COperadorBlob +{ +public: + //! Standard constructor (distance to point 0,0) + CBlobGetDistanceFromPoint() + { + m_x = m_y = 0.0; + } + //! Constructor (distance to point x,y) + CBlobGetDistanceFromPoint( const double x, const double y ) + { + m_x = x; + m_y = y; + } + + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetDistanceFromPoint"; + } + +private: + // coordenades del punt on volem calcular la distància + double m_x, m_y; +}; + +//! Classe per calcular el nombre de pixels externs d'un blob +//! Class to get the number of extern pixels of a blob +class CBlobGetExternPerimeter: public COperadorBlob +{ +public: + CBlobGetExternPerimeter() + { + m_mask = NULL; + m_xBorder = false; + m_yBorder = false; + } + CBlobGetExternPerimeter( IplImage *mask, bool xBorder = true, bool yBorder = true ) + { + m_mask = mask; + m_xBorder = xBorder; + m_yBorder = yBorder; + } + double operator()(CBlob &blob) + { + return blob.ExternPerimeter(m_mask, m_xBorder, m_yBorder); + } + const char *GetNom() + { + return "CBlobGetExternPerimeter"; + } +private: + IplImage *m_mask; + bool m_xBorder, m_yBorder; +}; + +//! Classe per calcular el ratio entre el perimetre i nombre pixels externs +//! valors propers a 0 indiquen que la majoria del blob és intern +//! valors propers a 1 indiquen que la majoria del blob és extern +//! Class to calculate the ratio between the perimeter and the number of extern pixels +class CBlobGetExternPerimeterRatio: public COperadorBlob +{ +public: + CBlobGetExternPerimeterRatio() + { + m_mask = NULL; + m_xBorder = false; + m_yBorder = false; + } + CBlobGetExternPerimeterRatio( IplImage *mask, bool xBorder = true, bool yBorder = true ) + { + m_mask = mask; + m_xBorder = xBorder; + m_yBorder = yBorder; + } + double operator()(CBlob &blob) + { + if( blob.Perimeter() != 0 ) + return blob.ExternPerimeter(m_mask, m_xBorder, m_yBorder) / blob.Perimeter(); + else + return blob.ExternPerimeter(m_mask, m_xBorder, m_yBorder); + } + const char *GetNom() + { + return "CBlobGetExternPerimeterRatio"; + } +private: + IplImage *m_mask; + bool m_xBorder, m_yBorder; +}; + +//! Classe per calcular el ratio entre el perimetre convex i nombre pixels externs +//! valors propers a 0 indiquen que la majoria del blob és intern +//! valors propers a 1 indiquen que la majoria del blob és extern +//! Class to calculate the ratio between the perimeter and the number of extern pixels +class CBlobGetExternHullPerimeterRatio: public COperadorBlob +{ +public: + CBlobGetExternHullPerimeterRatio() + { + m_mask = NULL; + m_xBorder = false; + m_yBorder = false; + } + CBlobGetExternHullPerimeterRatio( IplImage *mask, bool xBorder = true, bool yBorder = true ) + { + m_mask = mask; + m_xBorder = xBorder; + m_yBorder = yBorder; + } + double operator()(CBlob &blob) + { + CBlobGetHullPerimeter getHullPerimeter; + double hullPerimeter; + + if( (hullPerimeter = getHullPerimeter( blob ) ) != 0 ) + return blob.ExternPerimeter(m_mask, m_xBorder, m_yBorder) / hullPerimeter; + else + return blob.ExternPerimeter(m_mask, m_xBorder, m_yBorder); + } + const char *GetNom() + { + return "CBlobGetExternHullPerimeterRatio"; + } +private: + IplImage *m_mask; + bool m_xBorder, m_yBorder; + +}; + +//! Classe per calcular el centre en el eix X d'un blob +//! Class to calculate the center in the X direction +class CBlobGetXCenter: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.MinX() + (( blob.MaxX() - blob.MinX() ) / 2.0); + } + const char *GetNom() + { + return "CBlobGetXCenter"; + } +}; + +//! Classe per calcular el centre en el eix Y d'un blob +//! Class to calculate the center in the Y direction +class CBlobGetYCenter: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + return blob.MinY() + (( blob.MaxY() - blob.MinY() ) / 2.0); + } + const char *GetNom() + { + return "CBlobGetYCenter"; + } +}; + +//! Classe per calcular la longitud de l'eix major d'un blob +//! Class to calculate the length of the major axis of the ellipse that fits the blob edges +class CBlobGetMajorAxisLength: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + CvBox2D elipse = blob.GetEllipse(); + + return elipse.size.width; + } + const char *GetNom() + { + return "CBlobGetMajorAxisLength"; + } +}; + +//! Classe per calcular el ratio entre l'area de la elipse i la de la taca +//! Class +class CBlobGetAreaElipseRatio: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + if( blob.Area()==0.0 ) return 0.0; + + CvBox2D elipse = blob.GetEllipse(); + double ratioAreaElipseAreaTaca = ( (elipse.size.width/2.0) + * + (elipse.size.height/2.0) + *CV_PI + ) + / + blob.Area(); + + return ratioAreaElipseAreaTaca; + } + const char *GetNom() + { + return "CBlobGetAreaElipseRatio"; + } +}; + +//! Classe per calcular la longitud de l'eix menor d'un blob +//! Class to calculate the length of the minor axis of the ellipse that fits the blob edges +class CBlobGetMinorAxisLength: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + CvBox2D elipse = blob.GetEllipse(); + + return elipse.size.height; + } + const char *GetNom() + { + return "CBlobGetMinorAxisLength"; + } +}; + +//! Classe per calcular l'orientació de l'ellipse del blob en radians +//! Class to calculate the orientation of the ellipse that fits the blob edges in radians +class CBlobGetOrientation: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + CvBox2D elipse = blob.GetEllipse(); +/* + if( elipse.angle > 180.0 ) + return (( elipse.angle - 180.0 )* DEGREE2RAD); + else + return ( elipse.angle * DEGREE2RAD); +*/ + return elipse.angle; + } + const char *GetNom() + { + return "CBlobGetOrientation"; + } +}; + +//! Classe per calcular el cosinus de l'orientació de l'ellipse del blob +//! Class to calculate the cosinus of the orientation of the ellipse that fits the blob edges +class CBlobGetOrientationCos: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + CBlobGetOrientation getOrientation; + return fabs( cos( getOrientation(blob)*DEGREE2RAD )); + } + const char *GetNom() + { + return "CBlobGetOrientationCos"; + } +}; + + +//! Classe per calcular el ratio entre l'eix major i menor de la el·lipse +//! Class to calculate the ratio between both axes of the ellipse +class CBlobGetAxisRatio: public COperadorBlob +{ +public: + double operator()(CBlob &blob) + { + double major,minor; + CBlobGetMajorAxisLength getMajor; + CBlobGetMinorAxisLength getMinor; + + major = getMajor(blob); + minor = getMinor(blob); + + if( major != 0 ) + return minor / major; + else + return 0; + } + const char *GetNom() + { + return "CBlobGetAxisRatio"; + } +}; + + +//! Classe per calcular si un punt cau dins del blob +//! Class to calculate whether a point is inside a blob +class CBlobGetXYInside: public COperadorBlob +{ +public: + //! Constructor estàndard + //! Standard constructor + CBlobGetXYInside() + { + m_p.x = 0; + m_p.y = 0; + } + //! Constructor: indiquem el punt + //! Constructor: sets the point + CBlobGetXYInside( CvPoint2D32f p ) + { + m_p = p; + }; + double operator()(CBlob &blob); + const char *GetNom() + { + return "CBlobGetXYInside"; + } + +private: + //! punt que considerem + //! point to be considered + CvPoint2D32f m_p; +}; + +#endif //!BLOB_OPERATORS_H_INCLUDED diff --git a/CVBlob.framework/Versions/A/Headers/BlobProperties.h b/CVBlob.framework/Versions/A/Headers/BlobProperties.h new file mode 100644 index 0000000000..e4afc6d02c --- /dev/null +++ b/CVBlob.framework/Versions/A/Headers/BlobProperties.h @@ -0,0 +1,70 @@ + +//! Disable warnings referred to 255 character truncation for the std:map +#pragma warning( disable : 4786 ) + +#ifndef BLOB_PROPERTIES_H_INCLUDED +#define BLOB_PROPERTIES_H_INCLUDED + +#include +#include "BlobLibraryConfiguration.h" +#include "BlobContour.h" + + +#ifdef BLOB_OBJECT_FACTORY + //! Object factory pattern implementation + #include "..\inspecta\DesignPatterns\ObjectFactory.h" +#endif + + +//! Type of labelled images +typedef unsigned int t_labelType; + +//! Max order of calculated moments +#define MAX_MOMENTS_ORDER 3 + + +//! Blob class +class CBlobProperties +{ + typedef std::list t_contourList; + +public: + + CBlobProperties(); + virtual ~CBlobProperties(); + + //! Get blob area + double GetArea(); + + //! Get blob perimeter + double GetPerimeter(); + + //! Get contour moment (p,q up to MAX_CALCULATED_MOMENTS) + double GetMoment(int p, int q); + + + ////////////////////////////////////////////////////////////////////////// + // Blob contours + ////////////////////////////////////////////////////////////////////////// + + + //! Contour storage memory + CvMemStorage *m_storage; + //! External contour of the blob (crack codes) + CBlobContour m_externalContour; + //! Internal contours (crack codes) + t_contourList m_internalContours; + +private: + + //! Computed area from blob + double m_area; + //! Computed perimeter from blob + double m_perimeter; + // Computed moment from the blob + double m_moment[MAX_MOMENTS_ORDER*MAX_MOMENTS_ORDER]; + +}; + +#endif //!BLOB_PROPERTIES_H_INCLUDED + diff --git a/CVBlob.framework/Versions/A/Headers/BlobResult.h b/CVBlob.framework/Versions/A/Headers/BlobResult.h new file mode 100644 index 0000000000..e3c48b062b --- /dev/null +++ b/CVBlob.framework/Versions/A/Headers/BlobResult.h @@ -0,0 +1,171 @@ +/************************************************************************ + BlobResult.h + +FUNCIONALITAT: Definició de la classe CBlobResult +AUTOR: Inspecta S.L. +MODIFICACIONS (Modificació, Autor, Data): + +FUNCTIONALITY: Definition of the CBlobResult class +AUTHOR: Inspecta S.L. +MODIFICATIONS (Modification, Author, Date): + +**************************************************************************/ + + +#if !defined(_CLASSE_BLOBRESULT_INCLUDED) +#define _CLASSE_BLOBRESULT_INCLUDED + +#if _MSC_VER > 1000 +#pragma once +#endif // _MSC_VER > 1000 + +#include "BlobLibraryConfiguration.h" +#include +#include + +#ifdef MATRIXCV_ACTIU + #include "matrixCV.h" +#else + // llibreria STL + #include "vector" + //! Vector de doubles + typedef std::vector double_stl_vector; +#endif + +#include // vectors de la STL +#include +#include "blob.h" +#include "BlobOperators.h" +#include "ComponentLabeling.h" +/************************************************************************** + Filtres / Filters +**************************************************************************/ + +//! accions que es poden fer amb els filtres +//! Actions performed by a filter (include or exclude blobs) +#define B_INCLUDE 1L +#define B_EXCLUDE 2L + +//! condicions sobre els filtres +//! Conditions to apply the filters +#define B_EQUAL 3L +#define B_NOT_EQUAL 4L +#define B_GREATER 5L +#define B_LESS 6L +#define B_GREATER_OR_EQUAL 7L +#define B_LESS_OR_EQUAL 8L +#define B_INSIDE 9L +#define B_OUTSIDE 10L + + +/************************************************************************** + Excepcions / Exceptions +**************************************************************************/ + +//! Excepcions llençades per les funcions: +#define EXCEPTION_BLOB_OUT_OF_BOUNDS 1000 +#define EXCEPCIO_CALCUL_BLOBS 1001 + +/** + Classe que conté un conjunt de blobs i permet extreure'n propietats + o filtrar-los segons determinats criteris. + Class to calculate the blobs of an image and calculate some properties + on them. Also, the class provides functions to filter the blobs using + some criteria. +*/ +class CBlobResult +{ +public: + + //! constructor estandard, crea un conjunt buit de blobs + //! Standard constructor, it creates an empty set of blobs + CBlobResult(); + //! constructor a partir d'una imatge + //! Image constructor, it creates an object with the blobs of the image + CBlobResult(IplImage *source, IplImage *mask, uchar backgroundColor); + //! constructor de còpia + //! Copy constructor + CBlobResult( const CBlobResult &source ); + //! Destructor + virtual ~CBlobResult(); + + //! operador = per a fer assignacions entre CBlobResult + //! Assigment operator + CBlobResult& operator=(const CBlobResult& source); + //! operador + per concatenar dos CBlobResult + //! Addition operator to concatenate two sets of blobs + CBlobResult operator+( const CBlobResult& source ) const; + + //! Afegeix un blob al conjunt + //! Adds a blob to the set of blobs + void AddBlob( CBlob *blob ); + +#ifdef MATRIXCV_ACTIU + //! Calcula un valor sobre tots els blobs de la classe retornant una MatrixCV + //! Computes some property on all the blobs of the class + double_vector GetResult( funcio_calculBlob *evaluador ) const; +#endif + //! Calcula un valor sobre tots els blobs de la classe retornant un std::vector + //! Computes some property on all the blobs of the class + double_stl_vector GetSTLResult( funcio_calculBlob *evaluador ) const; + + //! Calcula un valor sobre un blob de la classe + //! Computes some property on one blob of the class + double GetNumber( int indexblob, funcio_calculBlob *evaluador ) const; + + //! Retorna aquells blobs que compleixen les condicions del filtre en el destination + //! Filters the blobs of the class using some property + void Filter(CBlobResult &dst, + int filterAction, funcio_calculBlob *evaluador, + int condition, double lowLimit, double highLimit = 0 ); + void Filter(CBlobResult &dst, + int filterAction, funcio_calculBlob *evaluador, + int condition, double lowLimit, double highLimit = 0 ) const; + + //! Retorna l'enèssim blob segons un determinat criteri + //! Sorts the blobs of the class acording to some criteria and returns the n-th blob + void GetNthBlob( funcio_calculBlob *criteri, int nBlob, CBlob &dst ) const; + + //! Retorna el blob enèssim + //! Gets the n-th blob of the class ( without sorting ) + CBlob GetBlob(int indexblob) const; + CBlob *GetBlob(int indexblob); + + //! Elimina tots els blobs de l'objecte + //! Clears all the blobs of the class + void ClearBlobs(); + + //! Escriu els blobs a un fitxer + //! Prints some features of all the blobs in a file + void PrintBlobs( char *nom_fitxer ) const; + + +//Metodes GET/SET + + //! Retorna el total de blobs + //! Gets the total number of blobs + int GetNumBlobs() const + { + return(m_blobs.size()); + } + + +private: + + //! Funció per gestionar els errors + //! Function to manage the errors + void RaiseError(const int errorCode) const; + + //! Does the Filter method job + void DoFilter(CBlobResult &dst, + int filterAction, funcio_calculBlob *evaluador, + int condition, double lowLimit, double highLimit = 0) const; + +protected: + + //! Vector amb els blobs + //! Vector with all the blobs + Blob_vector m_blobs; +}; + +#endif // !defined(_CLASSE_BLOBRESULT_INCLUDED) diff --git a/CVBlob.framework/Versions/A/Headers/ComponentLabeling.h b/CVBlob.framework/Versions/A/Headers/ComponentLabeling.h new file mode 100644 index 0000000000..592c21e923 --- /dev/null +++ b/CVBlob.framework/Versions/A/Headers/ComponentLabeling.h @@ -0,0 +1,30 @@ +#if !defined(_COMPONENT_LABELING_H_INCLUDED) +#define _CLASSE_BLOBRESULT_INCLUDED + +#include "vector" +#include "BlobContour.h" +#include "blob.h" + + +//! definició de que es un vector de blobs +typedef std::vector Blob_vector; + + + +bool ComponentLabeling( IplImage* inputImage, + IplImage* maskImage, + unsigned char backgroundColor, + Blob_vector &blobs ); + + +void contourTracing( IplImage *image, IplImage *mask, CvPoint contourStart, t_labelType *labels, + bool *visitedPoints, t_labelType label, + bool internalContour, unsigned char backgroundColor, + CBlobContour *currentBlobContour ); + +CvPoint tracer( IplImage *image, IplImage *mask, CvPoint P, bool *visitedPoints, + short initialMovement, + unsigned char backgroundColor, short &movement ); + + +#endif //!_CLASSE_BLOBRESULT_INCLUDED diff --git a/CVBlob.framework/Versions/A/Headers/blob.h b/CVBlob.framework/Versions/A/Headers/blob.h new file mode 100644 index 0000000000..89729d4050 --- /dev/null +++ b/CVBlob.framework/Versions/A/Headers/blob.h @@ -0,0 +1,172 @@ +/************************************************************************ + Blob.h + +FUNCIONALITAT: Definició de la classe CBlob +AUTOR: Inspecta S.L. +MODIFICACIONS (Modificació, Autor, Data): + +FUNCTIONALITY: Definition of the CBlob class and some helper classes to perform + some calculations on it +AUTHOR: Inspecta S.L. +MODIFICATIONS (Modification, Author, Date): + +**************************************************************************/ + +//! Disable warnings referred to 255 character truncation for the std:map +#pragma warning( disable : 4786 ) + +#ifndef CBLOB_INSPECTA_INCLUDED +#define CBLOB_INSPECTA_INCLUDED + +#include +#include "BlobLibraryConfiguration.h" +#include "BlobContour.h" + + +#ifdef BLOB_OBJECT_FACTORY + //! Object factory pattern implementation + #include "..\inspecta\DesignPatterns\ObjectFactory.h" +#endif + + +//! Type of labelled images +typedef unsigned int t_labelType; + + +//! Blob class +class CBlob +{ + typedef std::list t_contourList; + +public: + CBlob(); + CBlob( t_labelType id, CvPoint startPoint, CvSize originalImageSize ); + ~CBlob(); + + //! Copy constructor + CBlob( const CBlob &src ); + CBlob( const CBlob *src ); + + //! Operador d'assignació + //! Assigment operator + CBlob& operator=(const CBlob &src ); + + //! Adds a new internal contour to the blob + void AddInternalContour( const CBlobContour &newContour ); + + //! Retrieves contour in Freeman's chain code + CBlobContour *GetExternalContour() + { + return &m_externalContour; + } + + //! Retrieves blob storage + CvMemStorage *GetStorage() + { + return m_storage; + } + + //! Get label ID + t_labelType GetID() + { + return m_id; + } + //! > 0 for extern blobs, 0 if not + int Exterior( IplImage *mask, bool xBorder = true, bool yBorder = true ); + //! Compute blob's area + double Area(); + //! Compute blob's perimeter + double Perimeter(); + //! Compute blob's moment (p,q up to MAX_CALCULATED_MOMENTS) + double Moment(int p, int q); + + //! Compute extern perimeter + double ExternPerimeter( IplImage *mask, bool xBorder = true, bool yBorder = true ); + + //! Get mean grey color + double Mean( IplImage *image ); + + //! Get standard deviation grey color + double StdDev( IplImage *image ); + + //! Indica si el blob està buit ( no té cap info associada ) + //! Shows if the blob has associated information + bool IsEmpty(); + + //! Retorna el poligon convex del blob + //! Calculates the convex hull of the blob + t_PointList GetConvexHull(); + + //! Pinta l'interior d'un blob d'un color determinat + //! Paints the blob in an image + void FillBlob( IplImage *imatge, CvScalar color, int offsetX = 0, int offsetY = 0 ); + + //! Join a blob to current one (add's contour + void JoinBlob( CBlob *blob ); + + //! Get bounding box + CvRect GetBoundingBox(); + //! Get bounding ellipse + CvBox2D GetEllipse(); + + //! Minimun X + double MinX() + { + return GetBoundingBox().x; + } + //! Minimun Y + double MinY() + { + return GetBoundingBox().y; + } + //! Maximun X + double MaxX() + { + return GetBoundingBox().x + GetBoundingBox().width; + } + //! Maximun Y + double MaxY() + { + return GetBoundingBox().y + GetBoundingBox().height; + } +private: + + //! Deallocates all contours + void ClearContours(); + ////////////////////////////////////////////////////////////////////////// + // Blob contours + ////////////////////////////////////////////////////////////////////////// + + + //! Contour storage memory + CvMemStorage *m_storage; + //! External contour of the blob (crack codes) + CBlobContour m_externalContour; + //! Internal contours (crack codes) + t_contourList m_internalContours; + + ////////////////////////////////////////////////////////////////////////// + // Blob features + ////////////////////////////////////////////////////////////////////////// + + //! Label number + t_labelType m_id; + //! Area + double m_area; + //! Perimeter + double m_perimeter; + //! Extern perimeter from blob + double m_externPerimeter; + //! Mean gray color + double m_meanGray; + //! Standard deviation from gray color blob distribution + double m_stdDevGray; + //! Bounding box + CvRect m_boundingBox; + //! Bounding ellipse + CvBox2D m_ellipse; + //! Sizes from image where blob is extracted + CvSize m_originalImageSize; +}; + +#endif //CBLOB_INSPECTA_INCLUDED diff --git a/CVBlob.framework/Versions/Current b/CVBlob.framework/Versions/Current new file mode 120000 index 0000000000..a0f975209c --- /dev/null +++ b/CVBlob.framework/Versions/Current @@ -0,0 +1 @@ +A/ \ No newline at end of file diff --git a/OpenCV/Headers/calib3d/calib3d.hpp b/OpenCV/Headers/calib3d/calib3d.hpp new file mode 100644 index 0000000000..0d1cc46915 --- /dev/null +++ b/OpenCV/Headers/calib3d/calib3d.hpp @@ -0,0 +1,751 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CALIB3D_HPP__ +#define __OPENCV_CALIB3D_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Camera Calibration, Pose Estimation and Stereo * +\****************************************************************************************/ + +typedef struct CvPOSITObject CvPOSITObject; + +/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */ +CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count ); + + +/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of + an object given its model and projection in a weak-perspective case */ +CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points, + double focal_length, CvTermCriteria criteria, + float* rotation_matrix, float* translation_vector); + +/* Releases CvPOSITObject structure */ +CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object ); + +/* updates the number of RANSAC iterations */ +CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob, + int model_points, int max_iters ); + +CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst ); + +/* Calculates fundamental matrix given a set of corresponding points */ +#define CV_FM_7POINT 1 +#define CV_FM_8POINT 2 + +#define CV_LMEDS 4 +#define CV_RANSAC 8 + +#define CV_FM_LMEDS_ONLY CV_LMEDS +#define CV_FM_RANSAC_ONLY CV_RANSAC +#define CV_FM_LMEDS CV_LMEDS +#define CV_FM_RANSAC CV_RANSAC + +enum +{ + CV_ITERATIVE = 0, + CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" + CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem" +}; + +CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, + CvMat* fundamental_matrix, + int method CV_DEFAULT(CV_FM_RANSAC), + double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99), + CvMat* status CV_DEFAULT(NULL) ); + +/* For each input point on one of images + computes parameters of the corresponding + epipolar line on the other image */ +CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points, + int which_image, + const CvMat* fundamental_matrix, + CvMat* correspondent_lines ); + +/* Triangulation functions */ + +CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, + CvMat* projPoints1, CvMat* projPoints2, + CvMat* points4D); + +CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2, + CvMat* new_points1, CvMat* new_points2); + + +/* Computes the optimal new camera matrix according to the free scaling parameter alpha: + alpha=0 - only valid pixels will be retained in the undistorted image + alpha=1 - all the source image pixels will be retained in the undistorted image +*/ +CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, + const CvMat* dist_coeffs, + CvSize image_size, double alpha, + CvMat* new_camera_matrix, + CvSize new_imag_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pixel_ROI CV_DEFAULT(0), + int center_principal_point CV_DEFAULT(0)); + +/* Converts rotation vector to rotation matrix or vice versa */ +CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst, + CvMat* jacobian CV_DEFAULT(0) ); + +/* Finds perspective transformation between the object plane and image (view) plane */ +CVAPI(int) cvFindHomography( const CvMat* src_points, + const CvMat* dst_points, + CvMat* homography, + int method CV_DEFAULT(0), + double ransacReprojThreshold CV_DEFAULT(3), + CvMat* mask CV_DEFAULT(0)); + +/* Computes RQ decomposition for 3x3 matrices */ +CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, + CvMat *matrixQx CV_DEFAULT(NULL), + CvMat *matrixQy CV_DEFAULT(NULL), + CvMat *matrixQz CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes projection matrix decomposition */ +CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr, + CvMat *rotMatr, CvMat *posVect, + CvMat *rotMatrX CV_DEFAULT(NULL), + CvMat *rotMatrY CV_DEFAULT(NULL), + CvMat *rotMatrZ CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes d(AB)/dA and d(AB)/dB */ +CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB ); + +/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)), + t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */ +CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1, + const CvMat* _rvec2, const CvMat* _tvec2, + CvMat* _rvec3, CvMat* _tvec3, + CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0), + CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0), + CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0), + CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) ); + +/* Projects object points to the view plane using + the specified extrinsic and intrinsic camera parameters */ +CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, + const CvMat* translation_vector, const CvMat* camera_matrix, + const CvMat* distortion_coeffs, CvMat* image_points, + CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL), + CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL), + CvMat* dpddist CV_DEFAULT(NULL), + double aspect_ratio CV_DEFAULT(0)); + +/* Finds extrinsic camera parameters from + a few known corresponding point pairs and intrinsic parameters */ +CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* camera_matrix, + const CvMat* distortion_coeffs, + CvMat* rotation_vector, + CvMat* translation_vector, + int use_extrinsic_guess CV_DEFAULT(0) ); + +/* Computes initial estimate of the intrinsic camera parameters + in case of planar calibration target (e.g. chessboard) */ +CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points, + const CvMat* image_points, + const CvMat* npoints, CvSize image_size, + CvMat* camera_matrix, + double aspect_ratio CV_DEFAULT(1.) ); + +#define CV_CALIB_CB_ADAPTIVE_THRESH 1 +#define CV_CALIB_CB_NORMALIZE_IMAGE 2 +#define CV_CALIB_CB_FILTER_QUADS 4 +#define CV_CALIB_CB_FAST_CHECK 8 + +// Performs a fast check if a chessboard is in the input image. This is a workaround to +// a problem of cvFindChessboardCorners being slow on images with no chessboard +// - src: input image +// - size: chessboard size +// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called, +// 0 if there is no chessboard, -1 in case of error +CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size); + + /* Detects corners on a chessboard calibration pattern */ +CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size, + CvPoint2D32f* corners, + int* corner_count CV_DEFAULT(NULL), + int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) ); + +/* Draws individual chessboard corners or the whole chessboard detected */ +CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, + CvPoint2D32f* corners, + int count, int pattern_was_found ); + +#define CV_CALIB_USE_INTRINSIC_GUESS 1 +#define CV_CALIB_FIX_ASPECT_RATIO 2 +#define CV_CALIB_FIX_PRINCIPAL_POINT 4 +#define CV_CALIB_ZERO_TANGENT_DIST 8 +#define CV_CALIB_FIX_FOCAL_LENGTH 16 +#define CV_CALIB_FIX_K1 32 +#define CV_CALIB_FIX_K2 64 +#define CV_CALIB_FIX_K3 128 +#define CV_CALIB_FIX_K4 2048 +#define CV_CALIB_FIX_K5 4096 +#define CV_CALIB_FIX_K6 8192 +#define CV_CALIB_RATIONAL_MODEL 16384 + +/* Finds intrinsic and extrinsic camera parameters + from a few views of known calibration pattern */ +CVAPI(double) cvCalibrateCamera2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* point_counts, + CvSize image_size, + CvMat* camera_matrix, + CvMat* distortion_coeffs, + CvMat* rotation_vectors CV_DEFAULT(NULL), + CvMat* translation_vectors CV_DEFAULT(NULL), + int flags CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) ); + +/* Computes various useful characteristics of the camera from the data computed by + cvCalibrateCamera2 */ +CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix, + CvSize image_size, + double aperture_width CV_DEFAULT(0), + double aperture_height CV_DEFAULT(0), + double *fovx CV_DEFAULT(NULL), + double *fovy CV_DEFAULT(NULL), + double *focal_length CV_DEFAULT(NULL), + CvPoint2D64f *principal_point CV_DEFAULT(NULL), + double *pixel_aspect_ratio CV_DEFAULT(NULL)); + +#define CV_CALIB_FIX_INTRINSIC 256 +#define CV_CALIB_SAME_FOCAL_LENGTH 512 + +/* Computes the transformation from one camera coordinate system to another one + from a few correspondent views of the same calibration target. Optionally, calibrates + both cameras */ +CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, + const CvMat* image_points2, const CvMat* npoints, + CvMat* camera_matrix1, CvMat* dist_coeffs1, + CvMat* camera_matrix2, CvMat* dist_coeffs2, + CvSize image_size, CvMat* R, CvMat* T, + CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)), + int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC)); + +#define CV_CALIB_ZERO_DISPARITY 1024 + +/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both + views parallel (=> to make all the epipolar lines horizontal or vertical) */ +CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, + const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, + CvSize image_size, const CvMat* R, const CvMat* T, + CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, + CvMat* Q CV_DEFAULT(0), + int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY), + double alpha CV_DEFAULT(-1), + CvSize new_image_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pix_ROI1 CV_DEFAULT(0), + CvRect* valid_pix_ROI2 CV_DEFAULT(0)); + +/* Computes rectification transformations for uncalibrated pair of images using a set + of point correspondences */ +CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, + const CvMat* F, CvSize img_size, + CvMat* H1, CvMat* H2, + double threshold CV_DEFAULT(5)); + + + +/* stereo correspondence parameters and functions */ + +#define CV_STEREO_BM_NORMALIZED_RESPONSE 0 +#define CV_STEREO_BM_XSOBEL 1 + +/* Block matching algorithm structure */ +typedef struct CvStereoBMState +{ + // pre-filtering (normalization of input images) + int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now + int preFilterSize; // averaging window size: ~5x5..21x21 + int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap] + + // correspondence using Sum of Absolute Difference (SAD) + int SADWindowSize; // ~5x5..21x21 + int minDisparity; // minimum disparity (can be negative) + int numberOfDisparities; // maximum disparity - minimum disparity (> 0) + + // post-filtering + int textureThreshold; // the disparity is only computed for pixels + // with textured enough neighborhood + int uniquenessRatio; // accept the computed disparity d* only if + // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.) + // for any d != d*+/-1 within the search range. + int speckleWindowSize; // disparity variation window + int speckleRange; // acceptable range of variation in window + + int trySmallerWindows; // if 1, the results may be more accurate, + // at the expense of slower processing + CvRect roi1, roi2; + int disp12MaxDiff; + + // temporary buffers + CvMat* preFilteredImg0; + CvMat* preFilteredImg1; + CvMat* slidingSumBuf; + CvMat* cost; + CvMat* disp; +} CvStereoBMState; + +#define CV_STEREO_BM_BASIC 0 +#define CV_STEREO_BM_FISH_EYE 1 +#define CV_STEREO_BM_NARROW 2 + +CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC), + int numberOfDisparities CV_DEFAULT(0)); + +CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state ); + +CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right, + CvArr* disparity, CvStereoBMState* state ); + +CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity, + int numberOfDisparities, int SADWindowSize ); + +CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost, + int minDisparity, int numberOfDisparities, + int disp12MaxDiff CV_DEFAULT(1) ); + +/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */ +CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage, + CvArr* _3dImage, const CvMat* Q, + int handleMissingValues CV_DEFAULT(0) ); + + +#ifdef __cplusplus +} + +////////////////////////////////////////////////////////////////////////////////////////// +class CV_EXPORTS CvLevMarq +{ +public: + CvLevMarq(); + CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + ~CvLevMarq(); + void init( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + bool update( const CvMat*& param, CvMat*& J, CvMat*& err ); + bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm ); + + void clear(); + void step(); + enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 }; + + cv::Ptr mask; + cv::Ptr prevParam; + cv::Ptr param; + cv::Ptr J; + cv::Ptr err; + cv::Ptr JtJ; + cv::Ptr JtJN; + cv::Ptr JtErr; + cv::Ptr JtJV; + cv::Ptr JtJW; + double prevErrNorm, errNorm; + int lambdaLg10; + CvTermCriteria criteria; + int state; + int iters; + bool completeSymmFlag; +}; + +namespace cv +{ +//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation +CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray()); + +//! type of the robust estimation algorithm +enum +{ + LMEDS=CV_LMEDS, //!< least-median algorithm + RANSAC=CV_RANSAC //!< RANSAC algorithm +}; + +//! computes the best-fit perspective transformation mapping srcPoints to dstPoints. +CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints, + int method=0, double ransacReprojThreshold=3, + OutputArray mask=noArray()); + +//! variant of findHomography for backward compatibility +CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints, + OutputArray mask, int method=0, double ransacReprojThreshold=3); + +//! Computes RQ decomposition of 3x3 matrix +CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, + OutputArray Qx=noArray(), + OutputArray Qy=noArray(), + OutputArray Qz=noArray()); + +//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector +CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, + OutputArray rotMatrix, OutputArray transVect, + OutputArray rotMatrixX=noArray(), + OutputArray rotMatrixY=noArray(), + OutputArray rotMatrixZ=noArray(), + OutputArray eulerAngles=noArray() ); + +//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients +CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, + OutputArray dABdA, + OutputArray dABdB ); + +//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments +CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1, + InputArray rvec2, InputArray tvec2, + OutputArray rvec3, OutputArray tvec3, + OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(), + OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(), + OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(), + OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() ); + +//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters +CV_EXPORTS_W void projectPoints( InputArray objectPoints, + InputArray rvec, InputArray tvec, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray imagePoints, + OutputArray jacobian=noArray(), + double aspectRatio=0 ); + +//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled. +enum +{ + ITERATIVE=CV_ITERATIVE, + EPNP=CV_EPNP, + P3P=CV_P3P +}; +CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvec, OutputArray tvec, + bool useExtrinsicGuess=false, int flags=ITERATIVE); + +//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible. +CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + OutputArray rvec, + OutputArray tvec, + bool useExtrinsicGuess = false, + int iterationsCount = 100, + float reprojectionError = 8.0, + int minInliersCount = 100, + OutputArray inliers = noArray(), + int flags = ITERATIVE); + +//! initializes camera matrix from a few 3D points and the corresponding projections. +CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, double aspectRatio=1. ); + +enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2, + CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 }; + +//! finds checkerboard pattern of the specified size in the image +CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, + OutputArray corners, + int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE ); + +//! finds subpixel-accurate positions of the chessboard corners +CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size); + +//! draws the checkerboard pattern (found or partly found) in the image +CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize, + InputArray corners, bool patternWasFound ); + +enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2, + CALIB_CB_CLUSTERING = 4 }; + +//! finds circles' grid pattern of the specified size in the image +CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, + OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, + const Ptr &blobDetector = new SimpleBlobDetector()); + +//! the deprecated function. Use findCirclesGrid() instead of it. +CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize, + OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID ); +enum +{ + CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS, + CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO, + CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT, + CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST, + CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH, + CALIB_FIX_K1 = CV_CALIB_FIX_K1, + CALIB_FIX_K2 = CV_CALIB_FIX_K2, + CALIB_FIX_K3 = CV_CALIB_FIX_K3, + CALIB_FIX_K4 = CV_CALIB_FIX_K4, + CALIB_FIX_K5 = CV_CALIB_FIX_K5, + CALIB_FIX_K6 = CV_CALIB_FIX_K6, + CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL, + // only for stereo + CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC, + CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH, + // for stereo rectification + CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY +}; + +//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern. +CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, + CV_OUT InputOutputArray cameraMatrix, + CV_OUT InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + int flags=0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) ); + +//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size. +CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, + Size imageSize, + double apertureWidth, + double apertureHeight, + CV_OUT double& fovx, + CV_OUT double& fovy, + CV_OUT double& focalLength, + CV_OUT Point2d& principalPoint, + CV_OUT double& aspectRatio ); + +//! finds intrinsic and extrinsic parameters of a stereo camera +CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints1, + InputArrayOfArrays imagePoints2, + CV_OUT InputOutputArray cameraMatrix1, + CV_OUT InputOutputArray distCoeffs1, + CV_OUT InputOutputArray cameraMatrix2, + CV_OUT InputOutputArray distCoeffs2, + Size imageSize, OutputArray R, + OutputArray T, OutputArray E, OutputArray F, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), + int flags=CALIB_FIX_INTRINSIC ); + + +//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters +CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + Size imageSize, InputArray R, InputArray T, + OutputArray R1, OutputArray R2, + OutputArray P1, OutputArray P2, + OutputArray Q, int flags=CALIB_ZERO_DISPARITY, + double alpha=-1, Size newImageSize=Size(), + CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 ); + +//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed) +CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2, + InputArray F, Size imgSize, + OutputArray H1, OutputArray H2, + double threshold=5 ); + +//! computes the rectification transformations for 3-head camera, where all the heads are on the same line. +CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + InputArray cameraMatrix3, InputArray distCoeffs3, + InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3, + Size imageSize, InputArray R12, InputArray T12, + InputArray R13, InputArray T13, + OutputArray R1, OutputArray R2, OutputArray R3, + OutputArray P1, OutputArray P2, OutputArray P3, + OutputArray Q, double alpha, Size newImgSize, + CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags ); + +//! returns the optimal new camera matrix +CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, double alpha, Size newImgSize=Size(), + CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false); + +//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1)) +CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst ); + +//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z)) +CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst ); + +//! for backward compatibility +CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst ); + +//! the algorithm for finding fundamental matrix +enum +{ + FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm + FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm + FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm + FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm +}; + +//! finds fundamental matrix from a set of corresponding 2D points +CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, + int method=FM_RANSAC, + double param1=3., double param2=0.99, + OutputArray mask=noArray()); + +//! variant of findFundamentalMat for backward compatibility +CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2, + OutputArray mask, int method=FM_RANSAC, + double param1=3., double param2=0.99); + +//! finds coordinates of epipolar lines corresponding the specified points +CV_EXPORTS void computeCorrespondEpilines( InputArray points, + int whichImage, InputArray F, + OutputArray lines ); + +CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2, + InputArray projPoints1, InputArray projPoints2, + OutputArray points4D ); + +CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2, + OutputArray newPoints1, OutputArray newPoints2 ); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +/*! + Block Matching Stereo Correspondence Algorithm + + The class implements BM stereo correspondence algorithm by K. Konolige. +*/ +class CV_EXPORTS_W StereoBM +{ +public: + enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1, + BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 }; + + //! the default constructor + CV_WRAP StereoBM(); + //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size + CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21); + //! the method that reinitializes the state. The previous content is destroyed + void init(int preset, int ndisparities=0, int SADWindowSize=21); + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair + CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right, + OutputArray disparity, int disptype=CV_16S ); + + //! pointer to the underlying CvStereoBMState + Ptr state; +}; + + +/*! + Semi-Global Block Matching Stereo Correspondence Algorithm + + The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification. + */ +class CV_EXPORTS_W StereoSGBM +{ +public: + enum { DISP_SHIFT=4, DISP_SCALE = (1<(X,Y,Z) using the matrix Q returned by cv::stereoRectify +CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity, + OutputArray _3dImage, InputArray Q, + bool handleMissingValues=false, + int ddepth=-1 ); + +CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst, + OutputArray out, OutputArray inliers, + double ransacThreshold=3, double confidence=0.99); + +} + +#endif + +#endif diff --git a/OpenCV/Headers/contrib/contrib.hpp b/OpenCV/Headers/contrib/contrib.hpp new file mode 100644 index 0000000000..1b13fd7f34 --- /dev/null +++ b/OpenCV/Headers/contrib/contrib.hpp @@ -0,0 +1,986 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CONTRIB_HPP__ +#define __OPENCV_CONTRIB_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/objdetect/objdetect.hpp" + +#ifdef __cplusplus + +/****************************************************************************************\ +* Adaptive Skin Detector * +\****************************************************************************************/ + +class CV_EXPORTS CvAdaptiveSkinDetector +{ +private: + enum { + GSD_HUE_LT = 3, + GSD_HUE_UT = 33, + GSD_INTENSITY_LT = 15, + GSD_INTENSITY_UT = 250 + }; + + class CV_EXPORTS Histogram + { + private: + enum { + HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1) + }; + + protected: + int findCoverageIndex(double surfaceToCover, int defaultValue = 0); + + public: + CvHistogram *fHistogram; + Histogram(); + virtual ~Histogram(); + + void findCurveThresholds(int &x1, int &x2, double percent = 0.05); + void mergeWith(Histogram *source, double weight); + }; + + int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider; + double fHistogramMergeFactor, fHuePercentCovered; + Histogram histogramHueMotion, skinHueHistogram; + IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame; + IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame; + +protected: + void initData(IplImage *src, int widthDivider, int heightDivider); + void adaptiveFilter(); + +public: + + enum { + MORPHING_METHOD_NONE = 0, + MORPHING_METHOD_ERODE = 1, + MORPHING_METHOD_ERODE_ERODE = 2, + MORPHING_METHOD_ERODE_DILATE = 3 + }; + + CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE); + virtual ~CvAdaptiveSkinDetector(); + + virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask); +}; + + +/****************************************************************************************\ + * Fuzzy MeanShift Tracker * + \****************************************************************************************/ + +class CV_EXPORTS CvFuzzyPoint { +public: + double x, y, value; + + CvFuzzyPoint(double _x, double _y); +}; + +class CV_EXPORTS CvFuzzyCurve { +private: + std::vector points; + double value, centre; + + bool between(double x, double x1, double x2); + +public: + CvFuzzyCurve(); + ~CvFuzzyCurve(); + + void setCentre(double _centre); + double getCentre(); + void clear(); + void addPoint(double x, double y); + double calcValue(double param); + double getValue(); + void setValue(double _value); +}; + +class CV_EXPORTS CvFuzzyFunction { +public: + std::vector curves; + + CvFuzzyFunction(); + ~CvFuzzyFunction(); + void addCurve(CvFuzzyCurve *curve, double value = 0); + void resetValues(); + double calcValue(); + CvFuzzyCurve *newCurve(); +}; + +class CV_EXPORTS CvFuzzyRule { +private: + CvFuzzyCurve *fuzzyInput1, *fuzzyInput2; + CvFuzzyCurve *fuzzyOutput; +public: + CvFuzzyRule(); + ~CvFuzzyRule(); + void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcValue(double param1, double param2); + CvFuzzyCurve *getOutputCurve(); +}; + +class CV_EXPORTS CvFuzzyController { +private: + std::vector rules; +public: + CvFuzzyController(); + ~CvFuzzyController(); + void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcOutput(double param1, double param2); +}; + +class CV_EXPORTS CvFuzzyMeanShiftTracker +{ +private: + class FuzzyResizer + { + private: + CvFuzzyFunction iInput, iOutput; + CvFuzzyController fuzzyController; + public: + FuzzyResizer(); + int calcOutput(double edgeDensity, double density); + }; + + class SearchWindow + { + public: + FuzzyResizer *fuzzyResizer; + int x, y; + int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth; + int ldx, ldy, ldw, ldh, numShifts, numIters; + int xGc, yGc; + long m00, m01, m10, m11, m02, m20; + double ellipseAngle; + double density; + unsigned int depthLow, depthHigh; + int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom; + + SearchWindow(); + ~SearchWindow(); + void setSize(int _x, int _y, int _width, int _height); + void initDepthValues(IplImage *maskImage, IplImage *depthMap); + bool shift(); + void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth); + void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth); + }; + +public: + enum TrackingState + { + tsNone = 0, + tsSearching = 1, + tsTracking = 2, + tsSetWindow = 3, + tsDisabled = 10 + }; + + enum ResizeMethod { + rmEdgeDensityLinear = 0, + rmEdgeDensityFuzzy = 1, + rmInnerDensity = 2 + }; + + enum { + MinKernelMass = 1000 + }; + + SearchWindow kernel; + int searchMode; + +private: + enum + { + MaxMeanShiftIteration = 5, + MaxSetSizeIteration = 5 + }; + + void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth); + +public: + CvFuzzyMeanShiftTracker(); + ~CvFuzzyMeanShiftTracker(); + + void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass); +}; + + +namespace cv +{ + + class CV_EXPORTS Octree + { + public: + struct Node + { + Node() {} + int begin, end; + float x_min, x_max, y_min, y_max, z_min, z_max; + int maxLevels; + bool isLeaf; + int children[8]; + }; + + Octree(); + Octree( const vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual ~Octree(); + + virtual void buildTree( const vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual void getPointsWithinSphere( const Point3f& center, float radius, + vector& points ) const; + const vector& getNodes() const { return nodes; } + private: + int minPoints; + vector points; + vector nodes; + + virtual void buildNext(size_t node_ind); + }; + + + class CV_EXPORTS Mesh3D + { + public: + struct EmptyMeshException {}; + + Mesh3D(); + Mesh3D(const vector& vtx); + ~Mesh3D(); + + void buildOctree(); + void clearOctree(); + float estimateResolution(float tryRatio = 0.1f); + void computeNormals(float normalRadius, int minNeighbors = 20); + void computeNormals(const vector& subset, float normalRadius, int minNeighbors = 20); + + void writeAsVrml(const String& file, const vector& colors = vector()) const; + + vector vtx; + vector normals; + float resolution; + Octree octree; + + const static Point3f allzero; + }; + + class CV_EXPORTS SpinImageModel + { + public: + + /* model parameters, leave unset for default or auto estimate */ + float normalRadius; + int minNeighbors; + + float binSize; + int imageWidth; + + float lambda; + float gamma; + + float T_GeometriccConsistency; + float T_GroupingCorespondances; + + /* public interface */ + SpinImageModel(); + explicit SpinImageModel(const Mesh3D& mesh); + ~SpinImageModel(); + + void setLogger(std::ostream* log); + void selectRandomSubset(float ratio); + void setSubset(const vector& subset); + void compute(); + + void match(const SpinImageModel& scene, vector< vector >& result); + + Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const; + + size_t getSpinCount() const { return spinImages.rows; } + Mat getSpinImage(size_t index) const { return spinImages.row((int)index); } + const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; } + const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; } + + const Mesh3D& getMesh() const { return mesh; } + Mesh3D& getMesh() { return mesh; } + + /* static utility functions */ + static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result); + + static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal); + + static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2); + + static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2, + float gamma); + protected: + void defaultParams(); + + void matchSpinToModel(const Mat& spin, vector& indeces, + vector& corrCoeffs, bool useExtremeOutliers = true) const; + + void repackSpinImages(const vector& mask, Mat& spinImages, bool reAlloc = true) const; + + vector subset; + Mesh3D mesh; + Mat spinImages; + std::ostream* out; + }; + + class CV_EXPORTS TickMeter + { + public: + TickMeter(); + void start(); + void stop(); + + int64 getTimeTicks() const; + double getTimeMicro() const; + double getTimeMilli() const; + double getTimeSec() const; + int64 getCounter() const; + + void reset(); + private: + int64 counter; + int64 sumTime; + int64 startTime; + }; + + CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm); + + class CV_EXPORTS SelfSimDescriptor + { + public: + SelfSimDescriptor(); + SelfSimDescriptor(int _ssize, int _lsize, + int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET, + int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS, + int _nangles=DEFAULT_NUM_ANGLES); + SelfSimDescriptor(const SelfSimDescriptor& ss); + virtual ~SelfSimDescriptor(); + SelfSimDescriptor& operator = (const SelfSimDescriptor& ss); + + size_t getDescriptorSize() const; + Size getGridSize( Size imgsize, Size winStride ) const; + + virtual void compute(const Mat& img, vector& descriptors, Size winStride=Size(), + const vector& locations=vector()) const; + virtual void computeLogPolarMapping(Mat& mappingMask) const; + virtual void SSD(const Mat& img, Point pt, Mat& ssd) const; + + int smallSize; + int largeSize; + int startDistanceBucket; + int numberOfDistanceBuckets; + int numberOfAngles; + + enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41, + DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3, + DEFAULT_NUM_DISTANCE_BUCKETS = 7 }; + }; + + + typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data); + + class LevMarqSparse { + public: + LevMarqSparse(); + LevMarqSparse(int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data, // user-specific data passed to the callbacks + BundleAdjustCallback cb, void* user_data + ); + + virtual ~LevMarqSparse(); + + virtual void run( int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data // user-specific data passed to the callbacks + ); + + virtual void clear(); + + // useful function to do simple bundle adjustment tasks + static void bundleAdjust(vector& points, // positions of points in global coordinate system (input and output) + const vector >& imagePoints, // projections of 3d points for every camera + const vector >& visibility, // visibility of 3d points for every camera + vector& cameraMatrix, // intrinsic matrices of all cameras (input and output) + vector& R, // rotation matrices of all cameras (input and output) + vector& T, // translation vector of all cameras (input and output) + vector& distCoeffs, // distortion coefficients of all cameras (input and output) + const TermCriteria& criteria= + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON), + BundleAdjustCallback cb = 0, void* user_data = 0); + + public: + virtual void optimize(CvMat &_vis); //main function that runs minimization + + //iteratively asks for measurement for visible camera-point pairs + void ask_for_proj(CvMat &_vis,bool once=false); + //iteratively asks for Jacobians for every camera_point pair + void ask_for_projac(CvMat &_vis); + + CvMat* err; //error X-hX + double prevErrNorm, errNorm; + double lambda; + CvTermCriteria criteria; + int iters; + + CvMat** U; //size of array is equal to number of cameras + CvMat** V; //size of array is equal to number of points + CvMat** inv_V_star; //inverse of V* + + CvMat** A; + CvMat** B; + CvMat** W; + + CvMat* X; //measurement + CvMat* hX; //current measurement extimation given new parameter vector + + CvMat* prevP; //current already accepted parameter. + CvMat* P; // parameters used to evaluate function with new params + // this parameters may be rejected + + CvMat* deltaP; //computed increase of parameters (result of normal system solution ) + + CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation + // length of array is j = number of cameras + CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation + // length of array is i = number of points + + CvMat** Yj; //length of array is i = num_points + + CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params + + CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation + + CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j + + int num_cams; + int num_points; + int num_err_param; + int num_cam_param; + int num_point_param; + + //target function and jacobian pointers, which needs to be initialized + void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data); + void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data); + + void* data; + + BundleAdjustCallback cb; + void* user_data; + }; + + CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ, + CV_OUT vector >& results, CV_OUT vector& cost, + double templScale=1, int maxMatches = 20, + double minMatchDistance = 1.0, int padX = 3, + int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, + double orientationWeight = 0.5, double truncate = 20); + + + class CV_EXPORTS_W StereoVar + { + public: + // Flags + enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16}; + enum {CYCLE_O, CYCLE_V}; + enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK}; + + //! the default constructor + CV_WRAP StereoVar(); + + //! the full constructor taking all the necessary algorithm parameters + CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); + + //! the destructor + virtual ~StereoVar(); + + //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair + CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, Mat& disp); + + CV_PROP_RW int levels; + CV_PROP_RW double pyrScale; + CV_PROP_RW int nIt; + CV_PROP_RW int minDisp; + CV_PROP_RW int maxDisp; + CV_PROP_RW int poly_n; + CV_PROP_RW double poly_sigma; + CV_PROP_RW float fi; + CV_PROP_RW float lambda; + CV_PROP_RW int penalization; + CV_PROP_RW int cycle; + CV_PROP_RW int flags; + + private: + void autoParams(); + void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level); + void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + }; + + CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order); + + class CV_EXPORTS Directory + { + public: + static std::vector GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true ); + static std::vector GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true ); + static std::vector GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true ); + }; + + /* + * Generation of a set of different colors by the following way: + * 1) generate more then need colors (in "factor" times) in RGB, + * 2) convert them to Lab, + * 3) choose the needed count of colors from the set that are more different from + * each other, + * 4) convert the colors back to RGB + */ + CV_EXPORTS void generateColors( std::vector& colors, size_t count, size_t factor=100 ); + + + /* + * Estimate the rigid body motion from frame0 to frame1. The method is based on the paper + * "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011. + */ + enum { ROTATION = 1, + TRANSLATION = 2, + RIGID_BODY_MOTION = 4 + }; + CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt, + const Mat& image0, const Mat& depth0, const Mat& mask0, + const Mat& image1, const Mat& depth1, const Mat& mask1, + const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f, + const std::vector& iterCounts=std::vector(), + const std::vector& minGradientMagnitudes=std::vector(), + int transformType=RIGID_BODY_MOTION ); + + /** + *Bilinear interpolation technique. + * + *The value of a desired cortical pixel is obtained through a bilinear interpolation of the values + *of the four nearest neighbouring Cartesian pixels to the center of the RF. + *The same principle is applied to the inverse transformation. + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Interp + { + public: + + LogPolar_Interp() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0, + int interp=INTER_LINEAR, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Interp(); + + protected: + + Mat Rsri; + Mat Csri; + + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + int interp; + + Mat ETAyx; + Mat CSIyx; + + void create_map(int M, int N, int R, int S, double ro0); + }; + + /** + *Overlapping circular receptive fields technique + * + *The Cartesian plane is divided in two regions: the fovea and the periphery. + *The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in + *the periphery we use the overlapping Gaussian circular RFs. + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Overlapping + { + public: + LogPolar_Overlapping() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Overlapping(int w, int h, Point2i center, int R=70, + double ro0=3.0, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Overlapping(); + + protected: + + Mat Rsri; + Mat Csri; + vector Rsr; + vector Csr; + vector Wsr; + + int S, R, M, N, ind1; + int top, bottom,left,right; + double ro0, romax, a, q; + + struct kernel + { + kernel() { w = 0; } + vector weights; + int w; + }; + + Mat ETAyx; + Mat CSIyx; + vector w_ker_2D; + + void create_map(int M, int N, int R, int S, double ro0); + }; + + /** + * Adjacent receptive fields technique + * + *All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF. + *The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF. + *This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements + *to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370 + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Adjacent + { + public: + LogPolar_Adjacent() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param smin the size of the subpixel (default value 0.25 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Adjacent(); + + protected: + struct pixel + { + pixel() { u = v = 0; a = 0.; } + int u; + int v; + double a; + }; + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + vector > L; + vector A; + + void subdivide_recursively(double x, double y, int i, int j, double length, double smin); + bool get_uv(double x, double y, int&u, int&v); + void create_map(int M, int N, int R, int S, double ro0, double smin); + }; + + CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src); + CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src); + + class CV_EXPORTS LDA + { + public: + // Initializes a LDA with num_components (default 0) and specifies how + // samples are aligned (default dataAsRow=true). + LDA(int num_components = 0) : + _num_components(num_components) {}; + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(const Mat& src, vector labels, + int num_components = 0) : + _num_components(num_components) + { + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(InputArrayOfArrays src, InputArray labels, + int num_components = 0) : + _num_components(num_components) + { + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } + + // Serializes this object to a given filename. + void save(const string& filename) const; + + // Deserializes this object from a given filename. + void load(const string& filename); + + // Serializes this object to a given cv::FileStorage. + void save(FileStorage& fs) const; + + // Deserializes this object from a given cv::FileStorage. + void load(const FileStorage& node); + + // Destructor. + ~LDA() {} + + //! Compute the discriminants for data in src and labels. + void compute(InputArrayOfArrays src, InputArray labels); + + // Projects samples into the LDA subspace. + Mat project(InputArray src); + + // Reconstructs projections from the LDA subspace. + Mat reconstruct(InputArray src); + + // Returns the eigenvectors of this LDA. + Mat eigenvectors() const { return _eigenvectors; }; + + // Returns the eigenvalues of this LDA. + Mat eigenvalues() const { return _eigenvalues; } + + protected: + bool _dataAsRow; + int _num_components; + Mat _eigenvectors; + Mat _eigenvalues; + + void lda(InputArrayOfArrays src, InputArray labels); + }; + + class CV_EXPORTS_W FaceRecognizer : public Algorithm + { + public: + //! virtual destructor + virtual ~FaceRecognizer() {} + + // Trains a FaceRecognizer. + CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0; + + // Updates a FaceRecognizer. + CV_WRAP void update(InputArrayOfArrays src, InputArray labels); + + // Gets a prediction from a FaceRecognizer. + virtual int predict(InputArray src) const = 0; + + // Predicts the label and confidence for a given sample. + CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0; + + // Serializes this object to a given filename. + CV_WRAP virtual void save(const string& filename) const; + + // Deserializes this object from a given filename. + CV_WRAP virtual void load(const string& filename); + + // Serializes this object to a given cv::FileStorage. + virtual void save(FileStorage& fs) const = 0; + + // Deserializes this object from a given cv::FileStorage. + virtual void load(const FileStorage& fs) = 0; + + }; + + CV_EXPORTS_W Ptr createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); + CV_EXPORTS_W Ptr createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); + CV_EXPORTS_W Ptr createLBPHFaceRecognizer(int radius=1, int neighbors=8, + int grid_x=8, int grid_y=8, double threshold = DBL_MAX); + + enum + { + COLORMAP_AUTUMN = 0, + COLORMAP_BONE = 1, + COLORMAP_JET = 2, + COLORMAP_WINTER = 3, + COLORMAP_RAINBOW = 4, + COLORMAP_OCEAN = 5, + COLORMAP_SUMMER = 6, + COLORMAP_SPRING = 7, + COLORMAP_COOL = 8, + COLORMAP_HSV = 9, + COLORMAP_PINK = 10, + COLORMAP_HOT = 11 + }; + + CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap); + + CV_EXPORTS bool initModule_contrib(); +} + +#include "opencv2/contrib/retina.hpp" + +#include "opencv2/contrib/openfabmap.hpp" + +#endif + +#endif + diff --git a/OpenCV/Headers/contrib/detection_based_tracker.hpp b/OpenCV/Headers/contrib/detection_based_tracker.hpp new file mode 100644 index 0000000000..56aa1ccbed --- /dev/null +++ b/OpenCV/Headers/contrib/detection_based_tracker.hpp @@ -0,0 +1,106 @@ +#pragma once + +#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID) + +#include +#include + +#include + +class DetectionBasedTracker +{ + public: + struct Parameters + { + int minObjectSize; + int maxObjectSize; + double scaleFactor; + int maxTrackLifetime; + int minNeighbors; + int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0 + + Parameters(); + }; + + DetectionBasedTracker(const std::string& cascadeFilename, const Parameters& params); + virtual ~DetectionBasedTracker(); + + virtual bool run(); + virtual void stop(); + virtual void resetTracking(); + + virtual void process(const cv::Mat& imageGray); + + bool setParameters(const Parameters& params); + const Parameters& getParameters(); + + + typedef std::pair Object; + virtual void getObjects(std::vector& result) const; + virtual void getObjects(std::vector& result) const; + + protected: + class SeparateDetectionWork; + cv::Ptr separateDetectionWork; + friend void* workcycleObjectDetectorFunction(void* p); + + + struct InnerParameters + { + int numLastPositionsToTrack; + int numStepsToWaitBeforeFirstShow; + int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown; + int numStepsToShowWithoutDetecting; + + float coeffTrackingWindowSize; + float coeffObjectSizeToTrack; + float coeffObjectSpeedUsingInPrediction; + + InnerParameters(); + }; + Parameters parameters; + InnerParameters innerParameters; + + struct TrackedObject + { + typedef std::vector PositionsVector; + + PositionsVector lastPositions; + + int numDetectedFrames; + int numFramesNotDetected; + int id; + + TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0) + { + lastPositions.push_back(rect); + id=getNextId(); + }; + + static int getNextId() + { + static int _id=0; + return _id++; + } + }; + + int numTrackedSteps; + std::vector trackedObjects; + + std::vector weightsPositionsSmoothing; + std::vector weightsSizesSmoothing; + + cv::CascadeClassifier cascadeForTracking; + + + void updateTrackedObjects(const std::vector& detectedObjects); + cv::Rect calcTrackedObjectPositionToShow(int i) const; + void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector& detectedObjectsInRegions); +}; + +namespace cv +{ + using ::DetectionBasedTracker; +} //end of cv namespace + +#endif diff --git a/OpenCV/Headers/contrib/hybridtracker.hpp b/OpenCV/Headers/contrib/hybridtracker.hpp new file mode 100644 index 0000000000..3a1f722d70 --- /dev/null +++ b/OpenCV/Headers/contrib/hybridtracker.hpp @@ -0,0 +1,220 @@ +//*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HYBRIDTRACKER_H_ +#define __OPENCV_HYBRIDTRACKER_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/core/operations.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/video/tracking.hpp" +#include "opencv2/ml/ml.hpp" + +#ifdef __cplusplus + +namespace cv +{ + +// Motion model for tracking algorithm. Currently supports objects that do not move much. +// To add Kalman filter +struct CV_EXPORTS CvMotionModel +{ + enum {LOW_PASS_FILTER = 0, KALMAN_FILTER = 1, EM = 2}; + + CvMotionModel() + { + } + + float low_pass_gain; // low pass gain +}; + +// Mean Shift Tracker parameters for specifying use of HSV channel and CamShift parameters. +struct CV_EXPORTS CvMeanShiftTrackerParams +{ + enum { H = 0, HS = 1, HSV = 2 }; + CvMeanShiftTrackerParams(int tracking_type = CvMeanShiftTrackerParams::HS, + CvTermCriteria term_crit = CvTermCriteria()); + + int tracking_type; + vector h_range; + vector s_range; + vector v_range; + CvTermCriteria term_crit; +}; + +// Feature tracking parameters +struct CV_EXPORTS CvFeatureTrackerParams +{ + enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 }; + CvFeatureTrackerParams(int featureType = 0, int windowSize = 0) + { + feature_type = featureType; + window_size = windowSize; + } + + int feature_type; // Feature type to use + int window_size; // Window size in pixels around which to search for new window +}; + +// Hybrid Tracking parameters for specifying weights of individual trackers and motion model. +struct CV_EXPORTS CvHybridTrackerParams +{ + CvHybridTrackerParams(float ft_tracker_weight = 0.5, float ms_tracker_weight = 0.5, + CvFeatureTrackerParams ft_params = CvFeatureTrackerParams(), + CvMeanShiftTrackerParams ms_params = CvMeanShiftTrackerParams(), + CvMotionModel model = CvMotionModel()); + + float ft_tracker_weight; + float ms_tracker_weight; + CvFeatureTrackerParams ft_params; + CvMeanShiftTrackerParams ms_params; + int motion_model; + float low_pass_gain; +}; + +// Performs Camshift using parameters from MeanShiftTrackerParams +class CV_EXPORTS CvMeanShiftTracker +{ +private: + Mat hsv, hue; + Mat backproj; + Mat mask, maskroi; + MatND hist; + Rect prev_trackwindow; + RotatedRect prev_trackbox; + Point2f prev_center; + +public: + CvMeanShiftTrackerParams params; + + CvMeanShiftTracker(); + explicit CvMeanShiftTracker(CvMeanShiftTrackerParams _params); + ~CvMeanShiftTracker(); + void newTrackingWindow(Mat image, Rect selection); + RotatedRect updateTrackingWindow(Mat image); + Mat getHistogramProjection(int type); + void setTrackingWindow(Rect _window); + Rect getTrackingWindow(); + RotatedRect getTrackingEllipse(); + Point2f getTrackingCenter(); +}; + +// Performs SIFT/SURF feature tracking using parameters from FeatureTrackerParams +class CV_EXPORTS CvFeatureTracker +{ +private: + Ptr dd; + Ptr matcher; + vector matches; + + Mat prev_image; + Mat prev_image_bw; + Rect prev_trackwindow; + Point2d prev_center; + + int ittr; + vector features[2]; + +public: + Mat disp_matches; + CvFeatureTrackerParams params; + + CvFeatureTracker(); + explicit CvFeatureTracker(CvFeatureTrackerParams params); + ~CvFeatureTracker(); + void newTrackingWindow(Mat image, Rect selection); + Rect updateTrackingWindow(Mat image); + Rect updateTrackingWindowWithSIFT(Mat image); + Rect updateTrackingWindowWithFlow(Mat image); + void setTrackingWindow(Rect _window); + Rect getTrackingWindow(); + Point2f getTrackingCenter(); +}; + +// Performs Hybrid Tracking and combines individual trackers using EM or filters +class CV_EXPORTS CvHybridTracker +{ +private: + CvMeanShiftTracker* mstracker; + CvFeatureTracker* fttracker; + + CvMat* samples; + CvMat* labels; + + Rect prev_window; + Point2f prev_center; + Mat prev_proj; + RotatedRect trackbox; + + int ittr; + Point2f curr_center; + + inline float getL2Norm(Point2f p1, Point2f p2); + Mat getDistanceProjection(Mat image, Point2f center); + Mat getGaussianProjection(Mat image, int ksize, double sigma, Point2f center); + void updateTrackerWithEM(Mat image); + void updateTrackerWithLowPassFilter(Mat image); + +public: + CvHybridTrackerParams params; + CvHybridTracker(); + explicit CvHybridTracker(CvHybridTrackerParams params); + ~CvHybridTracker(); + + void newTracker(Mat image, Rect selection); + void updateTracker(Mat image); + Rect getTrackingWindow(); +}; + +typedef CvMotionModel MotionModel; +typedef CvMeanShiftTrackerParams MeanShiftTrackerParams; +typedef CvFeatureTrackerParams FeatureTrackerParams; +typedef CvHybridTrackerParams HybridTrackerParams; +typedef CvMeanShiftTracker MeanShiftTracker; +typedef CvFeatureTracker FeatureTracker; +typedef CvHybridTracker HybridTracker; +} + +#endif + +#endif diff --git a/OpenCV/Headers/contrib/openfabmap.hpp b/OpenCV/Headers/contrib/openfabmap.hpp new file mode 100644 index 0000000000..6b2834edc8 --- /dev/null +++ b/OpenCV/Headers/contrib/openfabmap.hpp @@ -0,0 +1,405 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// This file originates from the openFABMAP project: +// [http://code.google.com/p/openfabmap/] +// +// For published work which uses all or part of OpenFABMAP, please cite: +// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843] +// +// Original Algorithm by Mark Cummins and Paul Newman: +// [http://ijr.sagepub.com/content/27/6/647.short] +// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942] +// [http://ijr.sagepub.com/content/30/9/1100.abstract] +// +// License Agreement +// +// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and +// Will Maddern [w.maddern@qut.edu.au], all rights reserved. +// +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENFABMAP_H_ +#define __OPENCV_OPENFABMAP_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" + +#include +#include +#include +#include +#include + +namespace cv { + +namespace of2 { + +using std::list; +using std::map; +using std::multiset; + +/* + Return data format of a FABMAP compare call +*/ +struct CV_EXPORTS IMatch { + + IMatch() : + queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) { + } + IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) : + queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match( + _match) { + } + + int queryIdx; //query index + int imgIdx; //test index + + double likelihood; //raw loglikelihood + double match; //normalised probability + + bool operator<(const IMatch& m) const { + return match < m.match; + } + +}; + +/* + Base FabMap class. Each FabMap method inherits from this class. +*/ +class CV_EXPORTS FabMap { +public: + + //FabMap options + enum { + MEAN_FIELD = 1, + SAMPLED = 2, + NAIVE_BAYES = 4, + CHOW_LIU = 8, + MOTION_MODEL = 16 + }; + + FabMap(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0); + virtual ~FabMap(); + + //methods to add training data for sampling method + virtual void addTraining(const Mat& queryImgDescriptor); + virtual void addTraining(const vector& queryImgDescriptors); + + //methods to add to the test data + virtual void add(const Mat& queryImgDescriptor); + virtual void add(const vector& queryImgDescriptors); + + //accessors + const vector& getTrainingImgDescriptors() const; + const vector& getTestImgDescriptors() const; + + //Main FabMap image comparison + void compare(const Mat& queryImgDescriptor, + vector& matches, bool addQuery = false, + const Mat& mask = Mat()); + void compare(const Mat& queryImgDescriptor, + const Mat& testImgDescriptors, vector& matches, + const Mat& mask = Mat()); + void compare(const Mat& queryImgDescriptor, + const vector& testImgDescriptors, + vector& matches, const Mat& mask = Mat()); + void compare(const vector& queryImgDescriptors, vector< + IMatch>& matches, bool addQuery = false, const Mat& mask = + Mat()); + void compare(const vector& queryImgDescriptors, + const vector& testImgDescriptors, + vector& matches, const Mat& mask = Mat()); + +protected: + + void compareImgDescriptor(const Mat& queryImgDescriptor, + int queryIndex, const vector& testImgDescriptors, + vector& matches); + + void addImgDescriptor(const Mat& queryImgDescriptor); + + //the getLikelihoods method is overwritten for each different FabMap + //method. + virtual void getLikelihoods(const Mat& queryImgDescriptor, + const vector& testImgDescriptors, + vector& matches); + virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor); + + //turn likelihoods into probabilities (also add in motion model if used) + void normaliseDistribution(vector& matches); + + //Chow-Liu Tree + int pq(int q); + double Pzq(int q, bool zq); + double PzqGzpq(int q, bool zq, bool zpq); + + //FAB-MAP Core + double PzqGeq(bool zq, bool eq); + double PeqGL(int q, bool Lzq, bool eq); + double PzqGL(int q, bool zq, bool zpq, bool Lzq); + double PzqGzpqL(int q, bool zq, bool zpq, bool Lzq); + double (FabMap::*PzGL)(int q, bool zq, bool zpq, bool Lzq); + + //data + Mat clTree; + vector trainingImgDescriptors; + vector testImgDescriptors; + vector priorMatches; + + //parameters + double PzGe; + double PzGNe; + double Pnew; + + double mBias; + double sFactor; + + int flags; + int numSamples; + +}; + +/* + The original FAB-MAP algorithm, developed based on: + http://ijr.sagepub.com/content/27/6/647.short +*/ +class CV_EXPORTS FabMap1: public FabMap { +public: + FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0); + virtual ~FabMap1(); +protected: + + //FabMap1 implementation of likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); +}; + +/* + A computationally faster version of the original FAB-MAP algorithm. A look- + up-table is used to precompute many of the reoccuring calculations +*/ +class CV_EXPORTS FabMapLUT: public FabMap { +public: + FabMapLUT(const Mat& clTree, double PzGe, double PzGNe, + int flags, int numSamples = 0, int precision = 6); + virtual ~FabMapLUT(); +protected: + + //FabMap look-up-table implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + + //precomputed data + int (*table)[8]; + + //data precision + int precision; +}; + +/* + The Accelerated FAB-MAP algorithm, developed based on: + http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942 +*/ +class CV_EXPORTS FabMapFBO: public FabMap { +public: + FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd = + 1e-8, int bisectionStart = 512, int bisectionIts = 9); + virtual ~FabMapFBO(); + +protected: + + //FabMap Fast Bail-out implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + + //stucture used to determine word comparison order + struct WordStats { + WordStats() : + q(0), info(0), V(0), M(0) { + } + + WordStats(int _q, double _info) : + q(_q), info(_info), V(0), M(0) { + } + + int q; + double info; + mutable double V; + mutable double M; + + bool operator<(const WordStats& w) const { + return info < w.info; + } + + }; + + //private fast bail-out necessary functions + void setWordStatistics(const Mat& queryImgDescriptor, multiset& wordData); + double limitbisection(double v, double m); + double bennettInequality(double v, double m, double delta); + static bool compInfo(const WordStats& first, const WordStats& second); + + //parameters + double PsGd; + double rejectionThreshold; + int bisectionStart; + int bisectionIts; +}; + +/* + The FAB-MAP2.0 algorithm, developed based on: + http://ijr.sagepub.com/content/30/9/1100.abstract +*/ +class CV_EXPORTS FabMap2: public FabMap { +public: + + FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags); + virtual ~FabMap2(); + + //FabMap2 builds the inverted index and requires an additional training/test + //add function + void addTraining(const Mat& queryImgDescriptors) { + FabMap::addTraining(queryImgDescriptors); + } + void addTraining(const vector& queryImgDescriptors); + + void add(const Mat& queryImgDescriptors) { + FabMap::add(queryImgDescriptors); + } + void add(const vector& queryImgDescriptors); + +protected: + + //FabMap2 implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + double getNewPlaceLikelihood(const Mat& queryImgDescriptor); + + //the likelihood function using the inverted index + void getIndexLikelihoods(const Mat& queryImgDescriptor, vector< + double>& defaults, map >& invertedMap, + vector& matches); + void addToIndex(const Mat& queryImgDescriptor, + vector& defaults, + map >& invertedMap); + + //data + vector d1, d2, d3, d4; + vector > children; + + // TODO: inverted map a vector? + + vector trainingDefaults; + map > trainingInvertedMap; + + vector testDefaults; + map > testInvertedMap; + +}; +/* + A Chow-Liu tree is required by FAB-MAP. The Chow-Liu tree provides an + estimate of the full distribution of visual words using a minimum spanning + tree. The tree is generated through training data. +*/ +class CV_EXPORTS ChowLiuTree { +public: + ChowLiuTree(); + virtual ~ChowLiuTree(); + + //add data to the chow-liu tree before calling make + void add(const Mat& imgDescriptor); + void add(const vector& imgDescriptors); + + const vector& getImgDescriptors() const; + + Mat make(double infoThreshold = 0.0); + +private: + vector imgDescriptors; + Mat mergedImgDescriptors; + + typedef struct info { + float score; + short word1; + short word2; + } info; + + //probabilities extracted from mergedImgDescriptors + double P(int a, bool za); + double JP(int a, bool za, int b, bool zb); //a & b + double CP(int a, bool za, int b, bool zb); // a | b + + //calculating mutual information of all edges + void createBaseEdges(list& edges, double infoThreshold); + double calcMutInfo(int word1, int word2); + static bool sortInfoScores(const info& first, const info& second); + + //selecting minimum spanning egdges with maximum information + bool reduceEdgesToMinSpan(list& edges); + + //building the tree sctructure + Mat buildTree(int root_word, list &edges); + void recAddToTree(Mat &cltree, int q, int pq, + list &remaining_edges); + vector extractChildren(list &remaining_edges, int q); + +}; + +/* + A custom vocabulary training method based on: + http://www.springerlink.com/content/d1h6j8x552532003/ +*/ +class CV_EXPORTS BOWMSCTrainer: public BOWTrainer { +public: + BOWMSCTrainer(double clusterSize = 0.4); + virtual ~BOWMSCTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + virtual Mat cluster() const; + virtual Mat cluster(const Mat& descriptors) const; + +protected: + + double clusterSize; + +}; + +} + +} + +#endif /* OPENFABMAP_H_ */ diff --git a/OpenCV/Headers/contrib/retina.hpp b/OpenCV/Headers/contrib/retina.hpp new file mode 100644 index 0000000000..3ee291a8b7 --- /dev/null +++ b/OpenCV/Headers/contrib/retina.hpp @@ -0,0 +1,356 @@ +/*#****************************************************************************** + ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + ** + ** By downloading, copying, installing or using the software you agree to this license. + ** If you do not agree to this license, do not download, install, + ** copy or use the software. + ** + ** + ** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. + ** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. + ** + ** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) + ** + ** Creation - enhancement process 2007-2011 + ** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France + ** + ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). + ** Refer to the following research paper for more information: + ** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: + ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. + ** + ** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : + ** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: + ** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 + ** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. + ** ====> more informations in the above cited Jeanny Heraults's book. + ** + ** License Agreement + ** For Open Source Computer Vision Library + ** + ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. + ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. + ** + ** For Human Visual System tools (hvstools) + ** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. + ** + ** Third party copyrights are property of their respective owners. + ** + ** Redistribution and use in source and binary forms, with or without modification, + ** are permitted provided that the following conditions are met: + ** + ** * Redistributions of source code must retain the above copyright notice, + ** this list of conditions and the following disclaimer. + ** + ** * Redistributions in binary form must reproduce the above copyright notice, + ** this list of conditions and the following disclaimer in the documentation + ** and/or other materials provided with the distribution. + ** + ** * The name of the copyright holders may not be used to endorse or promote products + ** derived from this software without specific prior written permission. + ** + ** This software is provided by the copyright holders and contributors "as is" and + ** any express or implied warranties, including, but not limited to, the implied + ** warranties of merchantability and fitness for a particular purpose are disclaimed. + ** In no event shall the Intel Corporation or contributors be liable for any direct, + ** indirect, incidental, special, exemplary, or consequential damages + ** (including, but not limited to, procurement of substitute goods or services; + ** loss of use, data, or profits; or business interruption) however caused + ** and on any theory of liability, whether in contract, strict liability, + ** or tort (including negligence or otherwise) arising in any way out of + ** the use of this software, even if advised of the possibility of such damage. + *******************************************************************************/ + +#ifndef __OPENCV_CONTRIB_RETINA_HPP__ +#define __OPENCV_CONTRIB_RETINA_HPP__ + +/* + * Retina.hpp + * + * Created on: Jul 19, 2011 + * Author: Alexandre Benoit + */ + +#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support +#include + +namespace cv +{ + +enum RETINA_COLORSAMPLINGMETHOD +{ + RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice + RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR... + RETINA_COLOR_BAYER//!< standard bayer sampling +}; + +class RetinaFilter; + +/** + * @class Retina a wrapper class which allows the Gipsa/Listic Labs model to be used. + * This retina model allows spatio-temporal image processing (applied on still images, video sequences). + * As a summary, these are the retina model properties: + * => It applies a spectral whithening (mid-frequency details enhancement) + * => high frequency spatio-temporal noise reduction + * => low frequency luminance to be reduced (luminance range compression) + * => local logarithmic luminance compression allows details to be enhanced in low light conditions + * + * USE : this model can be used basically for spatio-temporal video effects but also for : + * _using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges + * _using the getMagno method output matrix : motion analysis also with the previously cited properties + * + * for more information, reer to the following papers : + * Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + * Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. + * + * The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : + * _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: + * ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 + * _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. + * ====> more informations in the above cited Jeanny Heraults's book. + */ +class CV_EXPORTS Retina { + +public: + + // parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel + struct RetinaParameters{ + struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters + OPLandIplParvoParameters():colorMode(true), + normaliseOutput(true), + photoreceptorsLocalAdaptationSensitivity(0.7f), + photoreceptorsTemporalConstant(0.5f), + photoreceptorsSpatialConstant(0.53f), + horizontalCellsGain(0.0f), + hcellsTemporalConstant(1.f), + hcellsSpatialConstant(7.f), + ganglionCellsSensitivity(0.7f){};// default setup + bool colorMode, normaliseOutput; + float photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity; + }; + struct IplMagnoParameters{ // Inner Plexiform Layer Magnocellular channel (IplMagno) + IplMagnoParameters(): + normaliseOutput(true), + parasolCells_beta(0.f), + parasolCells_tau(0.f), + parasolCells_k(7.f), + amacrinCellsTemporalCutFrequency(1.2f), + V0CompressionParameter(0.95f), + localAdaptintegration_tau(0.f), + localAdaptintegration_k(7.f){};// default setup + bool normaliseOutput; + float parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k; + }; + struct OPLandIplParvoParameters OPLandIplParvo; + struct IplMagnoParameters IplMagno; + }; + + /** + * Main constructor with most commun use setup : create an instance of color ready retina model + * @param inputSize : the input frame size + */ + Retina(Size inputSize); + + /** + * Complete Retina filter constructor which allows all basic structural parameters definition + * @param inputSize : the input frame size + * @param colorMode : the chosen processing mode : with or without color processing + * @param colorSamplingMethod: specifies which kind of color sampling will be used + * @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used + * @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak + * @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied + */ + Retina(Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); + + virtual ~Retina(); + + /** + * retreive retina input buffer size + */ + Size inputSize(); + + /** + * retreive retina output buffer size + */ + Size outputSize(); + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param retinaParameterFile : the parameters filename + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true); + + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param fs : the open Filestorage which contains retina parameters + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true); + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param newParameters : a parameters structures updated with the new target configuration + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(RetinaParameters newParameters); + + /** + * @return the current parameters setup + */ + struct Retina::RetinaParameters getParameters(); + + /** + * parameters setup display method + * @return a string which contains formatted parameters information + */ + const std::string printSetup(); + + /** + * write xml/yml formated parameters information + * @rparam fs : the filename of the xml file that will be open and writen with formatted parameters information + */ + virtual void write( std::string fs ) const; + + + /** + * write xml/yml formated parameters information + * @param fs : a cv::Filestorage object ready to be filled + */ + virtual void write( FileStorage& fs ) const; + + /** + * setup the OPL and IPL parvo channels (see biologocal model) + * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy) + * IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. + * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + * @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image + * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) + * @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases) + * @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame + * @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel + * @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0 + * @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors + * @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model) + * @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230 + */ + void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7, const float photoreceptorsTemporalConstant=0.5, const float photoreceptorsSpatialConstant=0.53, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7); + + /** + * set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel + * this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details. + * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) + * @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0 + * @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response) + * @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5 + * @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5 + * @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200 + * @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation + * @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation + */ + void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2, const float V0CompressionParameter=0.95, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7); + + /** + * method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods + * @param inputImage : the input cv::Mat image to be processed, can be gray level or BGR coded in any format (from 8bit to 16bits) + */ + void run(const Mat &inputImage); + + /** + * accessor of the details channel of the retina (models foveal vision) + * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV + */ + void getParvo(Mat &retinaOutput_parvo); + + /** + * accessor of the details channel of the retina (models foveal vision) + * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling + */ + void getParvo(std::valarray &retinaOutput_parvo); + + /** + * accessor of the motion channel of the retina (models peripheral vision) + * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV + */ + void getMagno(Mat &retinaOutput_magno); + + /** + * accessor of the motion channel of the retina (models peripheral vision) + * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling + */ + void getMagno(std::valarray &retinaOutput_magno); + + // original API level data accessors : get buffers addresses... + const std::valarray & getMagno() const; + const std::valarray & getParvo() const; + + /** + * activate color saturation as the final step of the color demultiplexing process + * -> this saturation is a sigmoide function applied to each channel of the demultiplexed image. + * @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false) + * @param colorSaturationValue: the saturation factor + */ + void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0); + + /** + * clear all retina buffers (equivalent to opening the eyes after a long period of eye close ;o) + */ + void clearBuffers(); + + /** + * Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated + * @param activate: true if Magnocellular output should be activated, false if not + */ + void activateMovingContoursProcessing(const bool activate); + + /** + * Activate/desactivate the Parvocellular pathway processing (contours information extraction), by default, it is activated + * @param activate: true if Parvocellular (contours information extraction) output should be activated, false if not + */ + void activateContoursProcessing(const bool activate); + +protected: + // Parameteres setup members + RetinaParameters _retinaParameters; // structure of parameters + + // Retina model related modules + std::valarray _inputBuffer; //!< buffer used to convert input cv::Mat to internal retina buffers format (valarrays) + + // pointer to retina model + RetinaFilter* _retinaFilter; //!< the pointer to the retina module, allocated with instance construction + + /** + * exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format + * @param grayMatrixToConvert the valarray to export to OpenCV + * @param nbRows : the number of rows of the valarray flatten matrix + * @param nbColumns : the number of rows of the valarray flatten matrix + * @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false) + * @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions + */ + void _convertValarrayBuffer2cvMat(const std::valarray &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer); + + /** + * + * @param inputMatToConvert : the OpenCV cv::Mat that has to be converted to gray or RGB valarray buffer that will be processed by the retina model + * @param outputValarrayMatrix : the output valarray + * @return the input image color mode (color=true, gray levels=false) + */ + bool _convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray &outputValarrayMatrix); + + //! private method called by constructors, gathers their parameters and use them in a unified way + void _init(const Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); + + +}; + +} +#endif /* __OPENCV_CONTRIB_RETINA_HPP__ */ + diff --git a/OpenCV/Headers/core/core.hpp b/OpenCV/Headers/core/core.hpp new file mode 100644 index 0000000000..561d26b3a4 --- /dev/null +++ b/OpenCV/Headers/core/core.hpp @@ -0,0 +1,4738 @@ +/*! \file core.hpp + \brief The Core Functionality + */ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_HPP__ +#define __OPENCV_CORE_HPP__ + +#include "opencv2/core/types_c.h" +#include "opencv2/core/version.hpp" + +#ifdef __cplusplus + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides +*/ +namespace cv { + +#undef abs +#undef min +#undef max +#undef Complex + +using std::vector; +using std::string; +using std::ptrdiff_t; + +template class CV_EXPORTS Size_; +template class CV_EXPORTS Point_; +template class CV_EXPORTS Rect_; +template class CV_EXPORTS Vec; +template class CV_EXPORTS Matx; + +typedef std::string String; + +class Mat; +class SparseMat; +typedef Mat MatND; + +class GlBuffer; +class GlTexture; +class GlArrays; +class GlCamera; + +namespace gpu { + class GpuMat; +} + +class CV_EXPORTS MatExpr; +class CV_EXPORTS MatOp_Base; +class CV_EXPORTS MatArg; +class CV_EXPORTS MatConstIterator; + +template class CV_EXPORTS Mat_; +template class CV_EXPORTS MatIterator_; +template class CV_EXPORTS MatConstIterator_; +template class CV_EXPORTS MatCommaInitializer_; + +#if !defined(ANDROID) || (defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_WCHAR_T) +typedef std::basic_string WString; + +CV_EXPORTS string fromUtf16(const WString& str); +CV_EXPORTS WString toUtf16(const string& str); +#endif + +CV_EXPORTS string format( const char* fmt, ... ); +CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0)); + +// matrix decomposition types +enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 }; +enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 }; +enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 }; +enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 }; +enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32, + DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS }; + + +/*! + The standard OpenCV exception class. + Instances of the class are thrown by various functions and methods in the case of critical errors. + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constuctor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const string& _err, const string& _func, const string& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw(); + void formatMessage(); + + string msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + string err; ///< error description + string func; ///< function name. Available only when the compiler supports __func__ macro + string file; ///< source file name where the error has occured + int line; ///< line number in the source file where the error has occured +}; + + +//! Signals an error and raises the exception. + +/*! + By default the function prints information about the error to stderr, + then it either stops if setBreakOnError() had been called before or raises the exception. + It is possible to alternate error processing by using redirectError(). + + \param exc the exception raisen. + */ +CV_EXPORTS void error( const Exception& exc ); + +//! Sets/resets the break-on-error mode. + +/*! + When the break-on-error mode is set, the default error handler + issues a hardware exception, which can make debugging more convenient. + + \return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + +//! Sets the new error handler and the optional user data. + +/*! + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, + void* userdata=0, void** prevUserdata=0); + +#ifdef __GNUC__ +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, __func__, __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, __func__, __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, __func__, __FILE__, __LINE__) ) +#else +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, "", __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, "", __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, "", __FILE__, __LINE__) ) +#endif + +#ifdef _DEBUG +#define CV_DbgAssert(expr) CV_Assert(expr) +#else +#define CV_DbgAssert(expr) +#endif + +CV_EXPORTS void setNumThreads(int nthreads); +CV_EXPORTS int getNumThreads(); +CV_EXPORTS int getThreadNum(); + +CV_EXPORTS_W const string& getBuildInformation(); + +//! Returns the number of ticks. + +/*! + The function returns the number of ticks since the certain event (e.g. when the machine was turned on). + It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count + before and after the function call. The granularity of ticks depends on the hardware and OS used. Use + cv::getTickFrequency() to convert ticks to seconds. +*/ +CV_EXPORTS_W int64 getTickCount(); + +/*! + Returns the number of ticks per seconds. + + The function returns the number of ticks (as returned by cv::getTickCount()) per second. + The following code computes the execution time in milliseconds: + + \code + double exec_time = (double)getTickCount(); + // do something ... + exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency(); + \endcode +*/ +CV_EXPORTS_W double getTickFrequency(); + +/*! + Returns the number of CPU ticks. + + On platforms where the feature is available, the function returns the number of CPU ticks + since the certain event (normally, the system power-on moment). Using this function + one can accurately measure the execution time of very small code fragments, + for which cv::getTickCount() granularity is not enough. +*/ +CV_EXPORTS_W int64 getCPUTickCount(); + +/*! + Returns SSE etc. support status + + The function returns true if certain hardware features are available. + Currently, the following features are recognized: + - CV_CPU_MMX - MMX + - CV_CPU_SSE - SSE + - CV_CPU_SSE2 - SSE 2 + - CV_CPU_SSE3 - SSE 3 + - CV_CPU_SSSE3 - SSSE 3 + - CV_CPU_SSE4_1 - SSE 4.1 + - CV_CPU_SSE4_2 - SSE 4.2 + - CV_CPU_POPCNT - POPCOUNT + - CV_CPU_AVX - AVX + + \note {Note that the function output is not static. Once you called cv::useOptimized(false), + most of the hardware acceleration is disabled and thus the function will returns false, + until you call cv::useOptimized(true)} +*/ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +//! returns the number of CPUs (including hyper-threading) +CV_EXPORTS_W int getNumberOfCPUs(); + +/*! + Allocates memory buffer + + This is specialized OpenCV memory allocation function that returns properly aligned memory buffers. + The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree(). + If there is not enough memory, the function calls cv::error(), which raises an exception. + + \param bufSize buffer size in bytes + \return the allocated memory buffer. +*/ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/*! + Frees the memory allocated with cv::fastMalloc + + This is the corresponding deallocation function for cv::fastMalloc(). + When ptr==NULL, the function has no effect. +*/ +CV_EXPORTS void fastFree(void* ptr); + +template static inline _Tp* allocate(size_t n) +{ + return new _Tp[n]; +} + +template static inline void deallocate(_Tp* ptr, size_t) +{ + delete[] ptr; +} + +/*! + Aligns pointer by the certain number of bytes + + This small inline function aligns the pointer by the certian number of bytes by shifting + it forward by 0 or a positive offset. +*/ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/*! + Aligns buffer size by the certain number of bytes + + This small inline function aligns a buffer size by the certian number of bytes by enlarging it. +*/ +static inline size_t alignSize(size_t sz, int n) +{ + return (sz + n-1) & -n; +} + +/*! + Turns on/off available optimization + + The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled + or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way. + + \note{Since optimization may imply using special data structures, it may be unsafe + to call this function anywhere in the code. Instead, call it somewhere at the top level.} +*/ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/*! + Returns the current optimization status + + The function returns the current optimization status, which is controlled by cv::setUseOptimized(). +*/ +CV_EXPORTS_W bool useOptimized(); + +/*! + The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class CV_EXPORTS Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) + { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + + void deallocate(pointer p, size_type) {fastFree(p); } + + size_type max_size() const + { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } +}; + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/*! + A helper class for cv::DataType + + The class is specialized for each fundamental numerical data type supported by OpenCV. + It provides DataDepth::value constant. +*/ +template class CV_EXPORTS DataDepth {}; + +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_16U, fmt=(int)'w' }; }; +template<> class DataDepth { public: enum { value = CV_16S, fmt=(int)'s' }; }; +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +// this is temporary solution to support 32-bit unsigned integers +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +template<> class DataDepth { public: enum { value = CV_32F, fmt=(int)'f' }; }; +template<> class DataDepth { public: enum { value = CV_64F, fmt=(int)'d' }; }; +template class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; }; + + +////////////////////////////// Small Matrix /////////////////////////// + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + */ + +struct CV_EXPORTS Matx_AddOp {}; +struct CV_EXPORTS Matx_SubOp {}; +struct CV_EXPORTS Matx_ScaleOp {}; +struct CV_EXPORTS Matx_MulOp {}; +struct CV_EXPORTS Matx_MatMulOp {}; +struct CV_EXPORTS Matx_TOp {}; + +template class CV_EXPORTS Matx +{ +public: + typedef _Tp value_type; + typedef Matx<_Tp, (m < n ? m : n), 1> diag_type; + typedef Matx<_Tp, m, n> mat_type; + enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols, + type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Matx(); + + Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + + static Matx all(_Tp alpha); + static Matx zeros(); + static Matx ones(); + static Matx eye(); + static Matx diag(const diag_type& d); + static Matx randu(_Tp a, _Tp b); + static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! convertion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int i, int j) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + diag_type diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert matrix the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Vec<_Tp, n> solve(const Vec<_Tp, m>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int i, int j) const; + _Tp& operator ()(int i, int j); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. +*/ +template class CV_EXPORTS Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + explicit Vec(const _Tp* values); + + Vec(const Vec<_Tp, cn>& v); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + //! conjugation (makes sense for complex numbers and quaternions) + Vec conj() const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! convertion to another data type + template operator Vec() const; + //! conversion to 4-element CvScalar. + operator CvScalar() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + + +/* \typedef + + Shorter aliases for the most popular specializations of Vec +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; + + +//////////////////////////////// Complex ////////////////////////////// + +/*! + A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class CV_EXPORTS Complex +{ +public: + + //! constructors + Complex(); + Complex( _Tp _re, _Tp _im=0 ); + Complex( const std::complex<_Tp>& c ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + //! conversion to std::complex + operator std::complex<_Tp>() const; + + _Tp re, im; //< the real and the imaginary parts +}; + + +/*! + \typedef +*/ +typedef Complex Complexf; +typedef Complex Complexd; + + +//////////////////////////////// Point_ //////////////////////////////// + +/*! + template 2D point class. + + The class defines a point in 2D space. Data type of the point coordinates is specified + as a template parameter. There are a few shorter aliases available for user convenience. + See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d. +*/ +template class CV_EXPORTS Point_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! cross-product + double cross(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates +}; + +/*! + template 3D point class. + + The class defines a point in 3D space. Data type of the point coordinates is specified + as a template parameter. + + \see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class CV_EXPORTS Point3_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates +}; + +//////////////////////////////// Size_ //////////////////////////////// + +/*! + The 2D size class + + The class represents the size of a 2D rectangle, image size, matrix size etc. + Normally, cv::Size ~ cv::Size_ is used. +*/ +template class CV_EXPORTS Size_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height +}; + +//////////////////////////////// Rect_ //////////////////////////////// + +/*! + The 2D up-right rectangle class + + The class represents a 2D rectangle with coordinates of the specified data type. + Normally, cv::Rect ~ cv::Rect_ is used. +*/ +template class CV_EXPORTS Rect_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle +}; + + +/*! + \typedef + + shorter aliases for the most popular cv::Point_<>, cv::Size_<> and cv::Rect_<> specializations +*/ +typedef Point_ Point2i; +typedef Point2i Point; +typedef Size_ Size2i; +typedef Size2i Size; +typedef Rect_ Rect; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Size_ Size2f; +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + + +/*! + The rotated 2D rectangle. + + The class represents rotated (i.e. not up-right) rectangles on a plane. + Each rectangle is described by the center point (mass center), length of each side + (represented by cv::Size2f structure) and the rotation angle in degrees. +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& center, const Size2f& size, float angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +}; + +//////////////////////////////// Scalar_ /////////////////////////////// + +/*! + The template scalar class. + + This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements. + Normally, cv::Scalar ~ cv::Scalar_ is used. +*/ +template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> +{ +public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0); + +//////////////////////////////// Range ///////////////////////////////// + +/*! + The 2D range class + + This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix. +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + + int start, end; +}; + +/////////////////////////////// DataType //////////////////////////////// + +/*! + Informative template class for OpenCV "scalars". + + The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float), + as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc. + The common property of all such types (called "scalars", do not confuse it with cv::Scalar_) + is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented + by the depth id (CV_8U ... CV_64F) and the number of channels. + OpenCV matrices, 2D or nD, dense or sparse, can store "scalars", + as long as the number of channels does not exceed CV_CN_MAX. +*/ +template class DataType +{ +public: + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 1, depth = -1, channels = 1, fmt=0, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Matx<_Tp, m, n> value_type; + typedef Matx::work_type, m, n> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = m*n, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = cn, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 3, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +//////////////////// generic_type ref-counting pointer class for C/C++ objects //////////////////////// + +/*! + Smart pointer to dynamically allocated objects. + + This is template pointer-wrapping class that stores the associated reference counter along with the + object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard, + but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library). + + Basically, you can use "Ptr ptr" (or faster "const Ptr& ptr" for read-only access) + everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class. + To make it all work, you need to specialize Ptr<>::delete_obj(), like: + + \code + template<> void Ptr::delete_obj() { call_destructor_func(obj); } + \endcode + + \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(), + since the default implementation calls "delete obj;"} + + \note{Another good property of the class is that the operations on the reference counter are atomic, + i.e. it is safe to use the class in multi-threaded applications} +*/ +template class CV_EXPORTS Ptr +{ +public: + //! empty constructor + Ptr(); + //! take ownership of the pointer. The associated reference counter is allocated and set to 1 + Ptr(_Tp* _obj); + //! calls release() + ~Ptr(); + //! copy constructor. Copies the members and calls addref() + Ptr(const Ptr& ptr); + template Ptr(const Ptr<_Tp2>& ptr); + //! copy operator. Calls ptr.addref() and release() before copying the members + Ptr& operator = (const Ptr& ptr); + //! increments the reference counter + void addref(); + //! decrements the reference counter. If it reaches 0, delete_obj() is called + void release(); + //! deletes the object. Override if needed + void delete_obj(); + //! returns true iff obj==NULL + bool empty() const; + + //! cast pointer to another type + template Ptr<_Tp2> ptr(); + template const Ptr<_Tp2> ptr() const; + + //! helper operators making "Ptr ptr" use very similar to "T* ptr". + _Tp* operator -> (); + const _Tp* operator -> () const; + + operator _Tp* (); + operator const _Tp*() const; + + _Tp* obj; //< the object pointer. + int* refcount; //< the associated reference counter +}; + + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _InputArray +{ +public: + enum { + KIND_SHIFT = 16, + FIXED_TYPE = 0x8000 << KIND_SHIFT, + FIXED_SIZE = 0x4000 << KIND_SHIFT, + KIND_MASK = ~(FIXED_TYPE|FIXED_SIZE) - (1 << KIND_SHIFT) + 1, + + NONE = 0 << KIND_SHIFT, + MAT = 1 << KIND_SHIFT, + MATX = 2 << KIND_SHIFT, + STD_VECTOR = 3 << KIND_SHIFT, + STD_VECTOR_VECTOR = 4 << KIND_SHIFT, + STD_VECTOR_MAT = 5 << KIND_SHIFT, + EXPR = 6 << KIND_SHIFT, + OPENGL_BUFFER = 7 << KIND_SHIFT, + OPENGL_TEXTURE = 8 << KIND_SHIFT, + GPU_MAT = 9 << KIND_SHIFT + }; + _InputArray(); + + _InputArray(const Mat& m); + _InputArray(const MatExpr& expr); + template _InputArray(const _Tp* vec, int n); + template _InputArray(const vector<_Tp>& vec); + template _InputArray(const vector >& vec); + _InputArray(const vector& vec); + template _InputArray(const vector >& vec); + template _InputArray(const Mat_<_Tp>& m); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const Scalar& s); + _InputArray(const double& val); + _InputArray(const GlBuffer& buf); + _InputArray(const GlTexture& tex); + _InputArray(const gpu::GpuMat& d_mat); + + virtual Mat getMat(int i=-1) const; + virtual void getMatVector(vector& mv) const; + virtual GlBuffer getGlBuffer() const; + virtual GlTexture getGlTexture() const; + virtual gpu::GpuMat getGpuMat() const; + + virtual int kind() const; + virtual Size size(int i=-1) const; + virtual size_t total(int i=-1) const; + virtual int type(int i=-1) const; + virtual int depth(int i=-1) const; + virtual int channels(int i=-1) const; + virtual bool empty() const; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~_InputArray(); +#endif + + int flags; + void* obj; + Size sz; +}; + + +enum +{ + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F +}; + + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + _OutputArray(); + + _OutputArray(Mat& m); + template _OutputArray(vector<_Tp>& vec); + template _OutputArray(vector >& vec); + _OutputArray(vector& vec); + template _OutputArray(vector >& vec); + template _OutputArray(Mat_<_Tp>& m); + template _OutputArray(Matx<_Tp, m, n>& matx); + template _OutputArray(_Tp* vec, int n); + _OutputArray(gpu::GpuMat& d_mat); + + _OutputArray(const Mat& m); + template _OutputArray(const vector<_Tp>& vec); + template _OutputArray(const vector >& vec); + _OutputArray(const vector& vec); + template _OutputArray(const vector >& vec); + template _OutputArray(const Mat_<_Tp>& m); + template _OutputArray(const Matx<_Tp, m, n>& matx); + template _OutputArray(const _Tp* vec, int n); + _OutputArray(const gpu::GpuMat& d_mat); + + virtual bool fixedSize() const; + virtual bool fixedType() const; + virtual bool needed() const; + virtual Mat& getMatRef(int i=-1) const; + /*virtual*/ gpu::GpuMat& getGpuMatRef() const; + virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void release() const; + virtual void clear() const; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~_OutputArray(); +#endif +}; + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef OutputArray InputOutputArray; +typedef OutputArray InputOutputArrayOfArrays; + +CV_EXPORTS OutputArray noArray(); + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + +static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); } + +/*! + Custom array allocator + +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + uchar*& datastart, uchar*& data, size_t* step) = 0; + virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; +}; + +/*! + The n-dimensional matrix class. + + The class represents an n-dimensional dense numerical array that can act as + a matrix, image, optical flow map, 3-focal tensor etc. + It is very similar to CvMat and CvMatND types from earlier versions of OpenCV, + and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism. + + There are many different ways to create cv::Mat object. Here are the some popular ones: +
    +
  • using cv::Mat::create(nrows, ncols, type) method or + the similar constructor cv::Mat::Mat(nrows, ncols, type[, fill_value]) constructor. + A new matrix of the specified size and specifed type will be allocated. + "type" has the same meaning as in cvCreateMat function, + e.g. CV_8UC1 means 8-bit single-channel matrix, CV_32FC2 means 2-channel (i.e. complex) + floating-point matrix etc: + + \code + // make 7x7 complex matrix filled with 1+3j. + cv::Mat M(7,7,CV_32FC2,Scalar(1,3)); + // and now turn M to 100x60 15-channel 8-bit matrix. + // The old content will be deallocated + M.create(100,60,CV_8UC(15)); + \endcode + + As noted in the introduction of this chapter, Mat::create() + will only allocate a new matrix when the current matrix dimensionality + or type are different from the specified. + +
  • by using a copy constructor or assignment operator, where on the right side it can + be a matrix or expression, see below. Again, as noted in the introduction, + matrix assignment is O(1) operation because it only copies the header + and increases the reference counter. cv::Mat::clone() method can be used to get a full + (a.k.a. deep) copy of the matrix when you need it. + +
  • by constructing a header for a part of another matrix. It can be a single row, single column, + several rows, several columns, rectangular region in the matrix (called a minor in algebra) or + a diagonal. Such operations are also O(1), because the new header will reference the same data. + You can actually modify a part of the matrix using this feature, e.g. + + \code + // add 5-th row, multiplied by 3 to the 3rd row + M.row(3) = M.row(3) + M.row(5)*3; + + // now copy 7-th column to the 1-st column + // M.col(1) = M.col(7); // this will not work + Mat M1 = M.col(1); + M.col(7).copyTo(M1); + + // create new 320x240 image + cv::Mat img(Size(320,240),CV_8UC3); + // select a roi + cv::Mat roi(img, Rect(10,10,100,100)); + // fill the ROI with (0,255,0) (which is green in RGB space); + // the original 320x240 image will be modified + roi = Scalar(0,255,0); + \endcode + + Thanks to the additional cv::Mat::datastart and cv::Mat::dataend members, it is possible to + compute the relative sub-matrix position in the main "container" matrix using cv::Mat::locateROI(): + + \code + Mat A = Mat::eye(10, 10, CV_32S); + // extracts A columns, 1 (inclusive) to 3 (exclusive). + Mat B = A(Range::all(), Range(1, 3)); + // extracts B rows, 5 (inclusive) to 9 (exclusive). + // that is, C ~ A(Range(5, 9), Range(1, 3)) + Mat C = B(Range(5, 9), Range::all()); + Size size; Point ofs; + C.locateROI(size, ofs); + // size will be (width=10,height=10) and the ofs will be (x=1, y=5) + \endcode + + As in the case of whole matrices, if you need a deep copy, use cv::Mat::clone() method + of the extracted sub-matrices. + +
  • by making a header for user-allocated-data. It can be useful for +
      +
    1. processing "foreign" data using OpenCV (e.g. when you implement + a DirectShow filter or a processing module for gstreamer etc.), e.g. + + \code + void process_video_frame(const unsigned char* pixels, + int width, int height, int step) + { + cv::Mat img(height, width, CV_8UC3, pixels, step); + cv::GaussianBlur(img, img, cv::Size(7,7), 1.5, 1.5); + } + \endcode + +
    2. for quick initialization of small matrices and/or super-fast element access + + \code + double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}}; + cv::Mat M = cv::Mat(3, 3, CV_64F, m).inv(); + \endcode +
    + + partial yet very common cases of this "user-allocated data" case are conversions + from CvMat and IplImage to cv::Mat. For this purpose there are special constructors + taking pointers to CvMat or IplImage and the optional + flag indicating whether to copy the data or not. + + Backward conversion from cv::Mat to CvMat or IplImage is provided via cast operators + cv::Mat::operator CvMat() an cv::Mat::operator IplImage(). + The operators do not copy the data. + + + \code + IplImage* img = cvLoadImage("greatwave.jpg", 1); + Mat mtx(img); // convert IplImage* -> cv::Mat + CvMat oldmat = mtx; // convert cv::Mat -> CvMat + CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height && + oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep); + \endcode + +
  • by using MATLAB-style matrix initializers, cv::Mat::zeros(), cv::Mat::ones(), cv::Mat::eye(), e.g.: + + \code + // create a double-precision identity martix and add it to M. + M += Mat::eye(M.rows, M.cols, CV_64F); + \endcode + +
  • by using comma-separated initializer: + + \code + // create 3x3 double-precision identity matrix + Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); + \endcode + + here we first call constructor of cv::Mat_ class (that we describe further) with the proper matrix, + and then we just put "<<" operator followed by comma-separated values that can be constants, + variables, expressions etc. Also, note the extra parentheses that are needed to avoid compiler errors. + +
+ + Once matrix is created, it will be automatically managed by using reference-counting mechanism + (unless the matrix header is built on top of user-allocated data, + in which case you should handle the data by yourself). + The matrix data will be deallocated when no one points to it; + if you want to release the data pointed by a matrix header before the matrix destructor is called, + use cv::Mat::release(). + + The next important thing to learn about the matrix class is element access. Here is how the matrix is stored. + The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row, + cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member, + cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be + a part of another matrix or because there can some padding space in the end of each row for a proper alignment. + + \image html roi.png + + Given these parameters, address of the matrix element M_{ij} is computed as following: + + addr(M_{ij})=M.data + M.step*i + j*M.elemSize() + + if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method: + + addr(M_{ij})=&M.at(i,j) + + (where & is used to convert the reference returned by cv::Mat::at() to a pointer). + if you need to process a whole row of matrix, the most efficient way is to get + the pointer to the row first, and then just use plain C operator []: + + \code + // compute sum of positive matrix elements + // (assuming that M is double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + + Some operations, like the above one, do not actually depend on the matrix shape, + they just process elements of a matrix one by one (or elements from multiple matrices + that are sitting in the same place, e.g. matrix addition). Such operations are called + element-wise and it makes sense to check whether all the input/output matrices are continuous, + i.e. have no gaps in the end of each row, and if yes, process them as a single long row: + + \code + // compute sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + in the case of continuous matrix the outer loop body will be executed just once, + so the overhead will be smaller, which will be especially noticeable in the case of small matrices. + + Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: + \code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); + \endcode + + The matrix iterators are random-access iterators, so they can be passed + to any STL algorithm, including std::sort(). +*/ +class CV_EXPORTS Mat +{ +public: + //! default constructor + Mat(); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + Mat(int rows, int cols, int type); + Mat(Size size, int type); + //! constucts 2D matrix and fills it with the specified value _s. + Mat(int rows, int cols, int type, const Scalar& s); + Mat(Size size, int type, const Scalar& s); + + //! constructs n-dimensional matrix + Mat(int ndims, const int* sizes, int type); + Mat(int ndims, const int* sizes, int type, const Scalar& s); + + //! copy constructor + Mat(const Mat& m); + //! constructor for matrix headers pointing to user-allocated data + Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP); + Mat(Size size, int type, void* data, size_t step=AUTO_STEP); + Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0); + + //! creates a matrix header for a part of the bigger matrix + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + Mat(const Mat& m, const Rect& roi); + Mat(const Mat& m, const Range* ranges); + //! converts old-style CvMat to the new matrix; the data is not copied by default + Mat(const CvMat* m, bool copyData=false); + //! converts old-style CvMatND to the new matrix; the data is not copied by default + Mat(const CvMatND* m, bool copyData=false); + //! converts old-style IplImage to the new matrix; the data is not copied by default + Mat(const IplImage* img, bool copyData=false); + //! builds matrix from std::vector with or without copying the data + template explicit Mat(const vector<_Tp>& vec, bool copyData=false); + //! builds matrix from cv::Vec; the data is copied by default + template explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true); + //! builds matrix from cv::Matx; the data is copied by default + template explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true); + //! builds matrix from a 2D point + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + //! builds matrix from a 3D point + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + //! builds matrix from comma initializer + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + + //! download data from GpuMat + explicit Mat(const gpu::GpuMat& m); + + //! destructor - calls release() + ~Mat(); + //! assignment operators + Mat& operator = (const Mat& m); + Mat& operator = (const MatExpr& expr); + + //! returns a new matrix header for the specified row + Mat row(int y) const; + //! returns a new matrix header for the specified column + Mat col(int x) const; + //! ... for the specified row span + Mat rowRange(int startrow, int endrow) const; + Mat rowRange(const Range& r) const; + //! ... for the specified column span + Mat colRange(int startcol, int endcol) const; + Mat colRange(const Range& r) const; + //! ... for the specified diagonal + // (d=0 - the main diagonal, + // >0 - a diagonal from the lower half, + // <0 - a diagonal from the upper half) + Mat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + static Mat diag(const Mat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + Mat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scalng. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( Mat& m, int type=-1 ) const; + + //! sets every matrix element to s + Mat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + Mat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + Mat reshape(int cn, int rows=0) const; + Mat reshape(int cn, int newndims, const int* newsz) const; + + //! matrix transposition by means of matrix expressions + MatExpr t() const; + //! matrix inversion by means of matrix expressions + MatExpr inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + MatExpr mul(InputArray m, double scale=1) const; + + //! computes cross-product of 2 3D vectors + Mat cross(InputArray m) const; + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + static MatExpr zeros(int rows, int cols, int type); + static MatExpr zeros(Size size, int type); + static MatExpr zeros(int ndims, const int* sz, int type); + static MatExpr ones(int rows, int cols, int type); + static MatExpr ones(Size size, int type); + static MatExpr ones(int ndims, const int* sz, int type); + static MatExpr eye(int rows, int cols, int type); + static MatExpr eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type); + void create(Size size, int type); + void create(int ndims, const int* sizes, int type); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + //! reserves enough space to fit sz hyper-planes + void reserve(size_t sz); + //! resizes matrix to the specified number of hyper-planes + void resize(size_t sz); + //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements + void resize(size_t sz, const Scalar& s); + //! internal function + void push_back_(const void* elem); + //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat) + template void push_back(const _Tp& elem); + template void push_back(const Mat_<_Tp>& elem); + void push_back(const Mat& m); + //! removes several hyper-planes from bottom of the matrix + void pop_back(size_t nelems=1); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + Mat operator()( Range rowRange, Range colRange ) const; + Mat operator()( const Rect& roi ) const; + Mat operator()( const Range* ranges ) const; + + //! converts header to CvMat; no data is copied + operator CvMat() const; + //! converts header to CvMatND; no data is copied + operator CvMatND() const; + //! converts header to IplImage; no data is copied + operator IplImage() const; + + template operator vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + //! returns pointer to i0-th submatrix along the dimension #0 + uchar* ptr(int i0=0); + const uchar* ptr(int i0=0) const; + + //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1 + uchar* ptr(int i0, int i1); + const uchar* ptr(int i0, int i1) const; + + //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2 + uchar* ptr(int i0, int i1, int i2); + const uchar* ptr(int i0, int i1, int i2) const; + + //! returns pointer to the matrix element + uchar* ptr(const int* idx); + //! returns read-only pointer to the matrix element + const uchar* ptr(const int* idx) const; + + template uchar* ptr(const Vec& idx); + template const uchar* ptr(const Vec& idx) const; + + //! template version of the above method + template _Tp* ptr(int i0=0); + template const _Tp* ptr(int i0=0) const; + + template _Tp* ptr(int i0, int i1); + template const _Tp* ptr(int i0, int i1) const; + + template _Tp* ptr(int i0, int i1, int i2); + template const _Tp* ptr(int i0, int i1, int i2) const; + + template _Tp* ptr(const int* idx); + template const _Tp* ptr(const int* idx) const; + + template _Tp* ptr(const Vec& idx); + template const _Tp* ptr(const Vec& idx) const; + + //! the same as above, with the pointer dereferencing + template _Tp& at(int i0=0); + template const _Tp& at(int i0=0) const; + + template _Tp& at(int i0, int i1); + template const _Tp& at(int i0, int i1) const; + + template _Tp& at(int i0, int i1, int i2); + template const _Tp& at(int i0, int i1, int i2) const; + + template _Tp& at(const int* idx); + template const _Tp& at(const int* idx) const; + + template _Tp& at(const Vec& idx); + template const _Tp& at(const Vec& idx) const; + + //! special versions for 2D arrays (especially convenient for referencing image pixels) + template _Tp& at(Point pt); + template const _Tp& at(Point pt) const; + + //! template methods for iteration over matrix elements. + // the iterators take care of skipping gaps in the end of rows (if any) + template MatIterator_<_Tp> begin(); + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> begin() const; + template MatConstIterator_<_Tp> end() const; + + enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when matrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + + struct CV_EXPORTS MSize + { + MSize(int* _p); + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const; + bool operator == (const MSize& sz) const; + bool operator != (const MSize& sz) const; + + int* p; + }; + + struct CV_EXPORTS MStep + { + MStep(); + MStep(size_t s); + const size_t& operator[](int i) const; + size_t& operator[](int i); + operator size_t() const; + MStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; + protected: + MStep& operator = (const MStep&); + }; + + MSize size; + MStep step; + +protected: + void initEmpty(); +}; + + +/*! + Random Number Generator + + The class implements RNG using Multiply-with-Carry algorithm +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM=0, NORMAL=1 }; + + RNG(); + RNG(uint64 state); + //! updates the state and returns the next 32-bit unsigned integer random number + unsigned next(); + + operator uchar(); + operator schar(); + operator ushort(); + operator short(); + operator unsigned(); + //! returns a random integer sampled uniformly from [0, N). + unsigned operator ()(unsigned N); + unsigned operator ()(); + operator int(); + operator float(); + operator double(); + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange=false ); + //! returns Gaussian random variate with mean zero. + double gaussian(double sigma); + + uint64 state; +}; + + +/*! + Termination criteria in iterative algorithms + */ +class CV_EXPORTS TermCriteria +{ +public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int _type, int _maxCount, double _epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion from CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy +}; + + +typedef void (*BinaryFunc)(const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, + void*); + +CV_EXPORTS BinaryFunc getConvertFunc(int sdepth, int ddepth); +CV_EXPORTS BinaryFunc getConvertScaleFunc(int sdepth, int ddepth); +CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz); + +//! swaps two matrices +CV_EXPORTS void swap(Mat& a, Mat& b); + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0); +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + +//! adds one matrix to another (dst = src1 + src2) +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); +//! subtracts one matrix from another (dst = src1 - src2) +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); + +//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2) +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale=1, int dtype=-1); + +//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2) +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale=1, int dtype=-1); + +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype=-1); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype=-1); + +//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_castabs(src(i)*alpha+beta) +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha=1, double beta=0); +//! transforms array of numbers using a lookup table: dst(i)=lut(src(i)) +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst, + int interpolation=0); + +//! computes sum of array elements +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); +//! computes the number of nonzero array elements +CV_EXPORTS_W int countNonZero( InputArray src ); +//! returns the list of locations of non-zero pixels +CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx ); + +//! computes mean value of selected array elements +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray()); +//! computes mean value and standard deviation of all or selected array elements +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); +//! computes norm of the selected array part +CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray()); +//! computes norm of selected part of the difference between two arrays +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType=NORM_L2, InputArray mask=noArray()); + +//! naive nearest neighbor finder +CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2, + OutputArray dist, int dtype, OutputArray nidx, + int normType=NORM_L2, int K=0, + InputArray mask=noArray(), int update=0, + bool crosscheck=false); + +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, + int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray()); + +//! finds global minimum and maximum array elements and returns their values and their locations +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0, + CV_OUT Point* maxLoc=0, InputArray mask=noArray()); +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal, + int* minIdx=0, int* maxIdx=0, InputArray mask=noArray()); + +//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); +CV_EXPORTS void merge(const vector& mv, OutputArray dst ); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); +CV_EXPORTS void split(const Mat& m, vector& mv ); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv); + +//! copies selected channels from the input arrays to the selected channels of the output arrays +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); +CV_EXPORTS void mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs); +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo); + +//! extracts a single channel from src (coi is 0-based index) +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +//! inserts a single channel to dst (coi is 0-based index) +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +//! reverses the order of the rows, columns or both in a matrix +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction +CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst); +CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx); + +CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst); + +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst); + +//! computes bitwise conjunction of the two arrays (dst = src1 & src2) +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise disjunction of the two arrays (dst = src1 | src2) +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2) +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! inverts each bit of array (dst = ~src) +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask=noArray()); +//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2)) +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); +//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb) +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); +//! compares elements of two arrays (dst = src1 src2) +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst); + +//! computes square root of each matrix element (dst = src**0.5) +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); +//! raises the input matrix elements to the specified power (b = a**power) +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); +//! computes exponent of each matrix element (dst = e**src) +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); +//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src)) +CV_EXPORTS_W void log(InputArray src, OutputArray dst); +//! computes cube root of the argument +CV_EXPORTS_W float cubeRoot(float val); +//! computes the angle in degrees (0..360) of the vector (x,y) +CV_EXPORTS_W float fastAtan2(float y, float x); + +CV_EXPORTS void exp(const float* src, float* dst, int n); +CV_EXPORTS void log(const float* src, float* dst, int n); +CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees); +CV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n); + +//! converts polar coordinates to Cartesian +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees=false); +//! converts Cartesian coordinates to polar +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees=false); +//! computes angle (angle(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees=false); +//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); +//! checks that each matrix element is within the specified range. +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pos=0, + double minVal=-DBL_MAX, double maxVal=DBL_MAX); +//! converts NaN's to the given number +CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val=0); + +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double gamma, OutputArray dst, int flags=0); +//! multiplies matrix by its transposition from the left or from the right +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta=noArray(), + double scale=1, int dtype=-1 ); +//! transposes the matrix +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); +//! performs affine transformation of each element of multi-channel input matrix +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); +//! performs perspective transformation of each element of multi-channel input matrix +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +//! extends the symmetrical matrix from the lower half or from the upper half +CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false); +//! initializes scaled identity matrix +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1)); +//! computes determinant of a square matrix +CV_EXPORTS_W double determinant(InputArray mtx); +//! computes trace of a matrix +CV_EXPORTS_W Scalar trace(InputArray mtx); +//! computes inverse or pseudo-inverse matrix +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU); +//! solves linear system or a least-square problem +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags=DECOMP_LU); + +enum +{ + SORT_EVERY_ROW=0, + SORT_EVERY_COLUMN=1, + SORT_ASCENDING=0, + SORT_DESCENDING=16 +}; + +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); +//! finds real roots of a cubic polynomial +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); +//! finds real and complex roots of a polynomial +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300); +//! finds eigenvalues of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, + int highindex=-1); +//! finds eigenvalues and eigenvectors of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors, + int lowindex=-1, int highindex=-1); +CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors, + OutputArray eigenvalues, OutputArray eigenvectors); + +enum +{ + COVAR_SCRAMBLED=0, + COVAR_NORMAL=1, + COVAR_USE_AVG=2, + COVAR_SCALE=4, + COVAR_ROWS=8, + COVAR_COLS=16 +}; + +//! computes covariation matrix of a set of samples +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype=CV_64F); +//! computes covariation matrix of a set of samples +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + OutputArray mean, int flags, int ctype=CV_64F); + +/*! + Principal Component Analysis + + The class PCA is used to compute the special basis for a set of vectors. + The basis will consist of eigenvectors of the covariance matrix computed + from the input set of vectors. After PCA is performed, vectors can be transformed from + the original high-dimensional space to the subspace formed by a few most + prominent eigenvectors (called the principal components), + corresponding to the largest eigenvalues of the covariation matrix. + Thus the dimensionality of the vector and the correlation between the coordinates is reduced. + + The following sample is the function that takes two matrices. The first one stores the set + of vectors (a row per vector) that is used to compute PCA, the second one stores another + "test" set of vectors (a row per vector) that are first compressed with PCA, + then reconstructed back and then the reconstruction error norm is computed and printed for each vector. + + \code + using namespace cv; + + PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) + { + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + CV_PCA_DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use CV_PCA_DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; + } + \endcode +*/ +class CV_EXPORTS PCA +{ +public: + //! default constructor + PCA(); + //! the constructor that performs PCA + PCA(InputArray data, InputArray mean, int flags, int maxComponents=0); + PCA(InputArray data, InputArray mean, int flags, double retainedVariance); + //! operator that performs PCA. The previously stored data, if any, is released + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0); + PCA& computeVar(InputArray data, InputArray mean, int flags, double retainedVariance); + //! projects vector from the original space to the principal components subspace + Mat project(InputArray vec) const; + //! projects vector from the original space to the principal components subspace + void project(InputArray vec, OutputArray result) const; + //! reconstructs the original vector from the projection + Mat backProject(InputArray vec) const; + //! reconstructs the original vector from the projection + void backProject(InputArray vec, OutputArray result) const; + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, int maxComponents=0); + +CV_EXPORTS_W void PCAComputeVar(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, double retainedVariance); + +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + + +/*! + Singular Value Decomposition class + + The class is used to compute Singular Value Decomposition of a floating-point matrix and then + use it to solve least-square problems, under-determined linear systems, invert matrices, + compute condition numbers etc. + + For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix + when it is not necessarily to preserve it. If you want to compute condition number of a matrix + or absolute value of its determinant - you do not need SVD::u or SVD::vt, + so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt + must be computed, which is not necessary most of the time. +*/ +class CV_EXPORTS SVD +{ +public: + enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 }; + //! the default constructor + SVD(); + //! the constructor that performs SVD + SVD( InputArray src, int flags=0 ); + //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. + SVD& operator ()( InputArray src, int flags=0 ); + + //! decomposes matrix and stores the results to user-provided matrices + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( InputArray src, OutputArray w, int flags=0 ); + //! performs back substitution + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + //! finds dst = arg min_{|dst|=1} |m*dst| + static void solveZ( InputArray src, OutputArray dst ); + //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix + void backSubst( InputArray rhs, OutputArray dst ) const; + + Mat u, w, vt; +}; + +//! computes SVD of src +CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w, + CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 ); + +//! performs back substitution for the previously computed SVD +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, CV_OUT OutputArray dst ); + +//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +//! a synonym for Mahalanobis +CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar); + +//! performs forward or inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs forward or inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0); +//! performs inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0); +//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB=false); +//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/*! + Various k-Means flags +*/ +enum +{ + KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization + KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization + KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization +}; +//! clusters the input data using k-Means algorithm +CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers=noArray() ); + +//! returns the thread-local Random number generator +CV_EXPORTS RNG& theRNG(); + +//! returns the next unifomly-distributed random number of the specified type +template static inline _Tp randu() { return (_Tp)theRNG(); } + +//! fills array with uniformly-distributed random numbers from the range [low, high) +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +//! fills array with normally-distributed random numbers with the specified mean and the standard deviation +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +//! shuffles the input array elements +CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0); +CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.); + +//! draws the line segment (pt1, pt2) in the image +CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image +CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle covering rec in the image +CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the circle outline or a solid circle in the image +CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image +CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws a rotated ellipse in the image +CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color, + int thickness=1, int lineType=8); + +//! draws a filled convex polygon in the image +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType=8, + int shift=0); +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType=8, + int shift=0); + +//! fills an area bounded by one or more polygons +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +//! draws one or more polygonal curves +CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height) +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +//! clips the line segment by the rectangle imgRect +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/*! + Line iterator class + + The class is used to iterate over all the pixels on the raster line + segment connecting two specified points. +*/ +class CV_EXPORTS LineIterator +{ +public: + //! intializes the iterator + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity=8, bool leftToRight=false ); + //! returns pointer to the current pixel + uchar* operator *(); + //! prefix increment operator (++it). shifts iterator to the next pixel + LineIterator& operator ++(); + //! postfix increment operator (it++). shifts iterator to the next pixel + LineIterator operator ++(int); + //! returns coordinates of the current pixel + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! converts elliptic arc to a polygonal curve +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT vector& pts ); + +enum +{ + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16 +}; + +//! renders text string in the image +CV_EXPORTS_W void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness=1, int lineType=8, + bool bottomLeftOrigin=false ); + +//! returns bounding box of the text string +CV_EXPORTS_W Size getTextSize(const string& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/*! + Template matrix class derived from Mat + + The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields, + nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes + can be safely converted one to another. But do it with care, for example: + + \code + // create 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will compile fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program will likely crash at the statement below + M1(99,99) = 1.f; + \endcode + + While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element + access operations and if you know matrix type at compile time. + Note that cv::Mat::at<_Tp>(int y, int x) and cv::Mat_<_Tp>::operator ()(int y, int x) do absolutely the + same thing and run at the same speed, but the latter is certainly shorter: + + \code + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); + \endcode + + It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter: + + \code + // allocate 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now modify the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y) + \endcode +*/ +template class CV_EXPORTS Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_(); + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion contructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + static MatExpr zeros(int rows, int cols); + static MatExpr zeros(Size size); + static MatExpr zeros(int _ndims, const int* _sizes); + static MatExpr ones(int rows, int cols); + static MatExpr ones(Size size); + static MatExpr ones(int _ndims, const int* _sizes); + static MatExpr eye(int rows, int cols); + static MatExpr eye(Size size); + + //! some more overriden methods + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int idx0, int idx1); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int idx0, int idx1) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator vector<_Tp>() const; + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +//////////// Iterators & Comma initializers ////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative=false); + void seek(const int* _idx, bool relative=false); + + const Mat* m; + size_t elemSize; + uchar* ptr; + uchar* sliceStart; + uchar* sliceEnd; +}; + +/*! + Matrix read-only iterator + + */ +template +class CV_EXPORTS MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + _Tp operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + +/*! + Matrix read-write iterator + +*/ +template +class CV_EXPORTS MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + +template class CV_EXPORTS MatOp_Iter_; + +/*! + Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class CV_EXPORTS MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + Mat_<_Tp> operator *() const; + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +template class CV_EXPORTS MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +template class CV_EXPORTS VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +/*! + Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf; // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +template class CV_EXPORTS AutoBuffer +{ +public: + typedef _Tp value_type; + enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) }; + + //! the default contructor + AutoBuffer(); + //! constructor taking the real buffer size + AutoBuffer(size_t _size); + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! returns pointer to the real buffer, stack-allocated or head-allocated + operator _Tp* (); + //! returns read-only pointer to the real buffer, stack-allocated or head-allocated + operator const _Tp* () const; + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t size; + //! pre-allocated buffer + _Tp buf[fixed_size+buffer_padding]; +}; + +/////////////////////////// multi-dimensional dense matrix ////////////////////////// + +/*! + n-Dimensional Dense Matrix Iterator Class. + + The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's). + + The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators. + It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays. + + Here is the example on how the iterator can be used to normalize 3D histogram: + + \code + void normalizeColorHist(Mat& hist) + { + #if 1 + // intialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes + // the iterator will go through + Mat* arrays[] = { &hist, 0 }; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + double s = 0; + // iterate through the matrix. on each iteration + // it.planes[i] (of type Mat) will be set to the current plane of + // i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < it.nplanes; p++, ++it) + s += sum(it.planes[0])[0]; + it = NAryMatIterator(hist); + s = 1./s; + for(int p = 0; p < it.nplanes; p++, ++it) + it.planes[0] *= s; + #elif 1 + // this is a shorter implementation of the above + // using built-in operations on Mat + double s = sum(hist)[0]; + hist.convertTo(hist, hist.type(), 1./s, 0); + #else + // and this is even shorter one + // (assuming that the histogram elements are non-negative) + normalize(hist, hist, 1, 0, NORM_L1); + #endif + } + \endcode + + You can iterate through several matrices simultaneously as long as they have the same geometry + (dimensionality and all the dimension sizes are the same), which is useful for binary + and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator. + Then, during the iteration it.planes[0], it.planes[1], ... will + be the slices of the corresponding matrices +*/ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + +//typedef NAryMatIterator NAryMatNDIterator; + +typedef void (*ConvertData)(const void* from, void* to, int cn); +typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta); + +//! returns the function for converting pixels from one data type to another +CV_EXPORTS ConvertData getConvertElem(int fromType, int toType); +//! returns the function for converting pixels from one data type to another with the optional scaling +CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +class SparseMatIterator; +class SparseMatConstIterator; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +/*! + Sparse matrix class. + + The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements + of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements + are stored (though, as a result of some operations on a sparse matrix, some of its stored elements + can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase(). + The non-zero elements are stored in a hash table that grows when it's filled enough, + so that the search time remains O(1) in average. Elements can be accessed using the following methods: + +
    +
  1. Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(), + cv::SparseMat::value() and cv::SparseMat::find, for example: + \code + const int dims = 5; + int size[] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand()%sparse_mat.size(k); + sparse_mat.ref(idx) += 1.f; + } + \endcode + +
  2. Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style, + that is, the iteration is done as following: + \code + // prints elements of a sparse floating-point matrix and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const Node* n = it.node(); + printf("(") + for(int i = 0; i < dims; i++) + printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')'); + printf(": %f\n", *it); + s += *it; + } + printf("Element sum is %g\n", s); + \endcode + If you run this loop, you will notice that elements are enumerated + in no any logical order (lexicographical etc.), + they come in the same order as they stored in the hash table, i.e. semi-randomly. + + You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more + elements to the matrix; this is because of possible buffer reallocation. + +
  3. A combination of the above 2 methods when you need to process 2 or more sparse + matrices simultaneously, e.g. this is how you can compute unnormalized + cross-correlation of the 2 floating-point sparse matrices: + \code + double crossCorr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it's faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find element with the same index in the second matrix. + // since the hash value depends only on the element index, + // we reuse hashvalue stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + \endcode +
+*/ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + vector pool; + vector hashtab; + int size[CV_MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[CV_MAX_DIM]; + }; + + //! default constructor + SparseMat(); + //! creates matrix of the specified size and type + SparseMat(int dims, const int* _sizes, int _type); + //! copy constructor + SparseMat(const SparseMat& m); + //! converts dense 2d matrix to the sparse form + /*! + \param m the input matrix + \param try1d if true and m is a single-column matrix (Nx1), + then the sparse matrix will be 1-dimensional. + */ + explicit SparseMat(const Mat& m); + //! converts old-style sparse matrix to the new-style. All the data is copied + SparseMat(const CvSparseMat* m); + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this) + \param alpha The scale factor + \param beta The optional delta added to the scaled values before the conversion + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + + return pointer to the matrix element. +
    +
  • if the element is there (it's non-zero), the pointer to it is returned +
  • if it's not there and createMissing=false, NULL pointer is returned +
  • if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned +
  • if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. +
+ */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //@} + + //@{ + /*! + return read-write reference to the specified sparse matrix element. + + ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]). + The methods always return a valid reference. + If the element did not exist, it is created and initialiazed with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //@} + + //@{ + /*! + return value of the specified sparse matrix element. + + value<_Tp>(i0,...[,hashval]) is equivalent + + \code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + \endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //@} + + //@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]). + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + int flags; + Hdr* hdr; +}; + +//! finds global minimum and maximum sparse array elements and returns their values and their locations +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx=0, int* maxIdx=0); +//! computes norm of a sparse matrix +CV_EXPORTS double norm( const SparseMat& src, int normType ); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/*! + Read-Only Sparse Matrix Iterator. + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + +/*! + Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + +/*! + The Template Sparse Matrix class derived from cv::SparseMat + + The class provides slightly more convenient operations for accessing elements. + + \code + SparseMat m; + ... + SparseMat_ m_ = (SparseMat_&)m; + m_.ref(1)++; // equivalent to m.ref(1)++; + m_.ref(2) += m_(3); // equivalent to m.ref(2) += m.value(3); + \endcode +*/ +template class CV_EXPORTS SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + +/*! + Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + +/*! + Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + +//////////////////// Fast Nearest-Neighbor Search Structure //////////////////// + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints=false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray dist=noArray(), + OutputArray labels=noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray labels=noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels=noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label=0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +class CV_EXPORTS FileNode; + +/*! + XML/YAML File Storage Class. + + The class describes an object associated with XML or YAML file. + It can be used to store data to such a file or read and decode the data. + + The storage is organized as a tree of nested sequences (or lists) and mappings. + Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator. + Mapping is analogue of std::map or C structure, which elements are accessed by names. + The most top level structure is a mapping. + Leaves of the file storage tree are integers, floating-point numbers and text strings. + + For example, the following code: + + \code + // open file storage for writing. Type of the file is determined from the extension + FileStorage fs("test.yml", FileStorage::WRITE); + fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH"; + fs << "test_mat" << Mat::eye(3,3,CV_32F); + + fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" << + "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]"; + fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:"; + + const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1}; + fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0]))); + + fs << "]" << "}"; + \endcode + + will produce the following file: + + \verbatim + %YAML:1.0 + test_int: 5 + test_real: 3.1000000000000001e+00 + test_string: ABCDEFGH + test_mat: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ] + test_list: + - 1.0000000000000000e-13 + - 2 + - 3.1415926535897931e+00 + - -3435345 + - "2-502 2-029 3egegeg" + - { month:12, day:31, year:1969 } + test_map: + x: 1 + y: 2 + width: 100 + height: 200 + lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ] + \endverbatim + + and to read the file above, the following code can be used: + + \code + // open file storage for reading. + // Type of the file is determined from the content, not the extension + FileStorage fs("test.yml", FileStorage::READ); + int test_int = (int)fs["test_int"]; + double test_real = (double)fs["test_real"]; + string test_string = (string)fs["test_string"]; + + Mat M; + fs["test_mat"] >> M; + + FileNode tl = fs["test_list"]; + CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6); + double tl0 = (double)tl[0]; + int tl1 = (int)tl[1]; + double tl2 = (double)tl[2]; + int tl3 = (int)tl[3]; + string tl4 = (string)tl[4]; + CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3); + + int month = (int)tl[5]["month"]; + int day = (int)tl[5]["day"]; + int year = (int)tl[5]["year"]; + + FileNode tm = fs["test_map"]; + + int x = (int)tm["x"]; + int y = (int)tm["y"]; + int width = (int)tm["width"]; + int height = (int)tm["height"]; + + int lbp_val = 0; + FileNodeIterator it = tm["lbp"].begin(); + + for(int k = 0; k < 8; k++, ++it) + lbp_val |= ((int)*it) << k; + \endcode +*/ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum + { + READ=0, //! read mode + WRITE=1, //! write mode + APPEND=2, //! append mode + MEMORY=4, + FORMAT_MASK=(7<<3), + FORMAT_AUTO=0, + FORMAT_XML=(1<<3), + FORMAT_YAML=(2<<3) + }; + enum + { + UNDEFINED=0, + VALUE_EXPECTED=1, + NAME_EXPECTED=2, + INSIDE_MAP=4 + }; + //! the default constructor + CV_WRAP FileStorage(); + //! the full constructor that opens file storage for reading or writing + CV_WRAP FileStorage(const string& source, int flags, const string& encoding=string()); + //! the constructor that takes pointer to the C FileStorage structure + FileStorage(CvFileStorage* fs); + //! the destructor. calls release() + virtual ~FileStorage(); + + //! opens file storage for reading or writing. The previous storage is closed with release() + CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string()); + //! returns true if the object is associated with currently opened file. + CV_WRAP virtual bool isOpened() const; + //! closes the file and releases all the memory buffers + CV_WRAP virtual void release(); + //! closes the file, releases all the memory buffers and returns the text string + CV_WRAP string releaseAndGetString(); + + //! returns the first element of the top-level mapping + CV_WRAP FileNode getFirstTopLevelNode() const; + //! returns the top-level mapping. YAML supports multiple streams + CV_WRAP FileNode root(int streamidx=0) const; + //! returns the specified element of the top-level mapping + FileNode operator[](const string& nodename) const; + //! returns the specified element of the top-level mapping + CV_WRAP FileNode operator[](const char* nodename) const; + + //! returns pointer to the underlying C FileStorage structure + CvFileStorage* operator *() { return fs; } + //! returns pointer to the underlying C FileStorage structure + const CvFileStorage* operator *() const { return fs; } + //! writes one or more numbers of the specified format to the currently written structure + void writeRaw( const string& fmt, const uchar* vec, size_t len ); + //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite() + void writeObj( const string& name, const void* obj ); + + //! returns the normalized object name for the specified file name + static string getDefaultObjectName(const string& filename); + + Ptr fs; //!< the underlying C FileStorage structure + string elname; //!< the currently written element + vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +class CV_EXPORTS FileNodeIterator; + +/*! + File Storage Node class + + The node is used to store each and every element of the file storage opened for reading - + from the primitive objects, such as numbers and text strings, to the complex nodes: + sequences, mappings and the registered objects. + + Note that file nodes are only used for navigating file storages opened for reading. + When a file storage is opened for writing, no data is stored in memory after it is written. +*/ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum + { + NONE=0, //!< empty node + INT=1, //!< an integer + REAL=2, //!< floating-point number + FLOAT=REAL, //!< synonym or REAL + STR=3, //!< text string in UTF-8 encoding + STRING=STR, //!< synonym for STR + REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ=5, //!< sequence + MAP=6, //!< mapping + TYPE_MASK=7, + FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER=16, //!< a registered object (e.g. a matrix) + EMPTY=32, //!< empty structure (sequence or mapping) + NAMED=64 //!< the node has a name (i.e. it is element of a mapping) + }; + //! the default constructor + CV_WRAP FileNode(); + //! the full constructor wrapping CvFileNode structure. + FileNode(const CvFileStorage* fs, const CvFileNode* node); + //! the copy constructor + FileNode(const FileNode& node); + //! returns element of a mapping node + FileNode operator[](const string& nodename) const; + //! returns element of a mapping node + CV_WRAP FileNode operator[](const char* nodename) const; + //! returns element of a sequence node + CV_WRAP FileNode operator[](int i) const; + //! returns type of the node + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP string name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + //! reads node elements to the buffer with the specified format + void readRaw( const string& fmt, uchar* vec, size_t len ) const; + //! reads the registered object and returns pointer to it + void* readObj() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/*! + File Node Iterator + + The class is used for iterating sequences (usually) and mappings. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + //! the default constructor + FileNodeIterator(); + //! the full constructor set to the ofs-th element of the node + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + //! the copy constructor + FileNodeIterator(const FileNodeIterator& it); + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int ofs); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int ofs); + + //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format + FileNodeIterator& readRaw( const string& fmt, uchar* vec, + size_t maxCount=(size_t)INT_MAX ); + + const CvFileStorage* fs; + const CvFileNode* container; + CvSeqReader reader; + size_t remaining; +}; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class CV_EXPORTS Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class CV_EXPORTS SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + +class CV_EXPORTS Algorithm; +class CV_EXPORTS AlgorithmInfo; +struct CV_EXPORTS AlgorithmInfoData; + +template struct ParamType {}; + +/*! + Base class for high-level OpenCV algorithms +*/ +class CV_EXPORTS_W Algorithm +{ +public: + Algorithm(); + virtual ~Algorithm(); + string name() const; + + template typename ParamType<_Tp>::member_type get(const string& name) const; + template typename ParamType<_Tp>::member_type get(const char* name) const; + + CV_WRAP int getInt(const string& name) const; + CV_WRAP double getDouble(const string& name) const; + CV_WRAP bool getBool(const string& name) const; + CV_WRAP string getString(const string& name) const; + CV_WRAP Mat getMat(const string& name) const; + CV_WRAP vector getMatVector(const string& name) const; + CV_WRAP Ptr getAlgorithm(const string& name) const; + + void set(const string& name, int value); + void set(const string& name, double value); + void set(const string& name, bool value); + void set(const string& name, const string& value); + void set(const string& name, const Mat& value); + void set(const string& name, const vector& value); + void set(const string& name, const Ptr& value); + template void set(const string& name, const Ptr<_Tp>& value); + + CV_WRAP void setInt(const string& name, int value); + CV_WRAP void setDouble(const string& name, double value); + CV_WRAP void setBool(const string& name, bool value); + CV_WRAP void setString(const string& name, const string& value); + CV_WRAP void setMat(const string& name, const Mat& value); + CV_WRAP void setMatVector(const string& name, const vector& value); + CV_WRAP void setAlgorithm(const string& name, const Ptr& value); + template void setAlgorithm(const string& name, const Ptr<_Tp>& value); + + void set(const char* name, int value); + void set(const char* name, double value); + void set(const char* name, bool value); + void set(const char* name, const string& value); + void set(const char* name, const Mat& value); + void set(const char* name, const vector& value); + void set(const char* name, const Ptr& value); + template void set(const char* name, const Ptr<_Tp>& value); + + void setInt(const char* name, int value); + void setDouble(const char* name, double value); + void setBool(const char* name, bool value); + void setString(const char* name, const string& value); + void setMat(const char* name, const Mat& value); + void setMatVector(const char* name, const vector& value); + void setAlgorithm(const char* name, const Ptr& value); + template void setAlgorithm(const char* name, const Ptr<_Tp>& value); + + CV_WRAP string paramHelp(const string& name) const; + int paramType(const char* name) const; + CV_WRAP int paramType(const string& name) const; + CV_WRAP void getParams(CV_OUT vector& names) const; + + + virtual void write(FileStorage& fs) const; + virtual void read(const FileNode& fn); + + typedef Algorithm* (*Constructor)(void); + typedef int (Algorithm::*Getter)() const; + typedef void (Algorithm::*Setter)(int); + + CV_WRAP static void getList(CV_OUT vector& algorithms); + CV_WRAP static Ptr _create(const string& name); + template static Ptr<_Tp> create(const string& name); + + virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; } +}; + + +class CV_EXPORTS AlgorithmInfo +{ +public: + friend class Algorithm; + AlgorithmInfo(const string& name, Algorithm::Constructor create); + ~AlgorithmInfo(); + void get(const Algorithm* algo, const char* name, int argType, void* value) const; + void addParam_(Algorithm& algo, const char* name, int argType, + void* value, bool readOnly, + Algorithm::Getter getter, Algorithm::Setter setter, + const string& help=string()); + string paramHelp(const char* name) const; + int paramType(const char* name) const; + void getParams(vector& names) const; + + void write(const Algorithm* algo, FileStorage& fs) const; + void read(Algorithm* algo, const FileNode& fn) const; + string name() const; + + void addParam(Algorithm& algo, const char* name, + int& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + short& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + bool& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + double& value, bool readOnly=false, + double (Algorithm::*getter)()=0, + void (Algorithm::*setter)(double)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + string& value, bool readOnly=false, + string (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const string&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + Mat& value, bool readOnly=false, + Mat (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Mat&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + vector& value, bool readOnly=false, + vector (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const vector&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + Ptr& value, bool readOnly=false, + Ptr (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr&)=0, + const string& help=string()); + template void addParam(Algorithm& algo, const char* name, + Ptr<_Tp>& value, bool readOnly=false, + Ptr<_Tp> (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr<_Tp>&)=0, + const string& help=string()); + template void addParam(Algorithm& algo, const char* name, + Ptr<_Tp>& value, bool readOnly=false, + Ptr<_Tp> (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr<_Tp>&)=0, + const string& help=string()); +protected: + AlgorithmInfoData* data; + void set(Algorithm* algo, const char* name, int argType, + const void* value, bool force=false) const; +}; + + +struct CV_EXPORTS Param +{ + enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, SHORT=10 }; + + Param(); + Param(int _type, bool _readonly, int _offset, + Algorithm::Getter _getter=0, + Algorithm::Setter _setter=0, + const string& _help=string()); + int type; + int offset; + bool readonly; + Algorithm::Getter getter; + Algorithm::Setter setter; + string help; +}; + +template<> struct ParamType +{ + typedef bool const_param_type; + typedef bool member_type; + + enum { type = Param::BOOLEAN }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::INT }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::SHORT }; +}; + +template<> struct ParamType +{ + typedef double const_param_type; + typedef double member_type; + + enum { type = Param::REAL }; +}; + +template<> struct ParamType +{ + typedef const string& const_param_type; + typedef string member_type; + + enum { type = Param::STRING }; +}; + +template<> struct ParamType +{ + typedef const Mat& const_param_type; + typedef Mat member_type; + + enum { type = Param::MAT }; +}; + +template<> struct ParamType > +{ + typedef const vector& const_param_type; + typedef vector member_type; + + enum { type = Param::MAT_VECTOR }; +}; + +template<> struct ParamType +{ + typedef const Ptr& const_param_type; + typedef Ptr member_type; + + enum { type = Param::ALGORITHM }; +}; + +template<> struct ParamType +{ + typedef float const_param_type; + typedef float member_type; + + enum { type = Param::FLOAT }; +}; + +template<> struct ParamType +{ + typedef unsigned const_param_type; + typedef unsigned member_type; + + enum { type = Param::UNSIGNED_INT }; +}; + +template<> struct ParamType +{ + typedef uint64 const_param_type; + typedef uint64 member_type; + + enum { type = Param::UINT64 }; +}; + + +/*! +"\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" +*/ +class CV_EXPORTS CommandLineParser +{ + public: + + //! the default constructor + CommandLineParser(int argc, const char* const argv[], const char* key_map); + + //! get parameter, you can choose: delete spaces in end and begin or not + template + _Tp get(const std::string& name, bool space_delete=true) + { + if (!has(name)) + { + return _Tp(); + } + std::string str = getString(name); + return analyzeValue<_Tp>(str, space_delete); + } + + //! print short name, full name, current value and help for all params + void printParams(); + + protected: + std::map > data; + std::string getString(const std::string& name); + + bool has(const std::string& keys); + + template + _Tp analyzeValue(const std::string& str, bool space_delete=false); + + template + static _Tp getData(const std::string& str) + { + _Tp res; + std::stringstream s1(str); + s1 >> res; + return res; + } + + template + _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers + + }; + +template<> CV_EXPORTS +bool CommandLineParser::get(const std::string& name, bool space_delete); + +template<> CV_EXPORTS +std::string CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +int CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +unsigned int CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +uint64 CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +float CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +double CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + + +/////////////////////////////// Parallel Primitives ////////////////////////////////// + +// a base body class +class CV_EXPORTS ParallelLoopBody +{ +public: + virtual ~ParallelLoopBody(); + virtual void operator() (const Range& range) const = 0; +}; + +CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); + +/////////////////////////// Synchronization Primitives /////////////////////////////// + +class CV_EXPORTS Mutex +{ +public: + Mutex(); + ~Mutex(); + Mutex(const Mutex& m); + Mutex& operator = (const Mutex& m); + + void lock(); + bool trylock(); + void unlock(); + + struct Impl; +protected: + Impl* impl; +}; + +class CV_EXPORTS AutoLock +{ +public: + AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); } + ~AutoLock() { mutex->unlock(); } +protected: + Mutex* mutex; +}; + +} + +#endif // __cplusplus + +#include "opencv2/core/operations.hpp" +#include "opencv2/core/mat.hpp" + +#endif /*__OPENCV_CORE_HPP__*/ diff --git a/OpenCV/Headers/core/core_c.h b/OpenCV/Headers/core/core_c.h new file mode 100644 index 0000000000..df763ab9a4 --- /dev/null +++ b/OpenCV/Headers/core/core_c.h @@ -0,0 +1,1885 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_CORE_C_H__ +#define __OPENCV_CORE_C_H__ + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/* wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/* wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/* Allocates and initializes IplImage header */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/* Inializes IplImage header */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/* Creates IPL image (header and data) */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/* Releases (i.e. deallocates) IPL image header */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/* Releases IPL image header and data */ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/* Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/* Sets a Channel Of Interest (only a few functions support COI) - + use cvCopy to extract the selected channel and/or put it back */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/* Retrieves image Channel Of Interest */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/* Sets image ROI (region of interest) (COI is not changed) */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/* Resets image ROI and COI */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/* Retrieves image ROI */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/* Allocates and initalizes CvMat header */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/* Initializes CvMat header */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/* Allocates and initializes CvMat header and allocates data */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/* Releases CvMat header and deallocates matrix data + (reference counting is used for data) */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/* Decrements CvMat data reference counter and deallocates the data if + it reaches 0 */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/* Increments CvMat data reference counter */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/* Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/* Makes a new matrix from subrectangle of input array. + No data is copied */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/* Selects row span of the input array: arr(start_row:delta_row:end_row,:) + (end_row is not included into the span). */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/* Selects column span of the input array: arr(:,start_col:end_col) + (end_col is not included into the span) */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/* Select a diagonal of the input array. + (diag = 0 means the main diagonal, >0 means a diagonal above the main one, + <0 - below the main one). + The diagonal will be represented as a column (nx1 matrix). */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/* low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/* Allocates and initializes CvMatND header */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/* Allocates and initializes CvMatND header and allocates data */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/* Initializes preallocated CvMatND header */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/* Releases CvMatND */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/* Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/* Allocates and initializes CvSparseMat header and allocates data */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/* Releases CvSparseMat */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/* Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/* Initializes sparse array iterator + (returns the first node or NULL if the array is empty) */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +// returns next sparse array node (or NULL if there is no more nodes) +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + +/**************** matrix iterator: used for n-ary operations on dense arrays *********/ + +#define CV_MAX_ARR 10 + +typedef struct CvNArrayIterator +{ + int count; /* number of arrays */ + int dims; /* number of dimensions to iterate */ + CvSize size; /* maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */ + int stack[CV_MAX_DIM]; /* for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/* initializes iterator that traverses through several arrays simulteneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/* returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/* Returns type of array elements: + CV_8UC1 ... CV_64FC4 ... */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/* Retrieves number of an array dimensions and + optionally sizes of the dimensions */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/* Retrieves size of a particular array dimension. + For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height) + and cvGetDimSize(arr,1) returns number of columns (image width) */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/* ptr = &arr(idx0,idx1,...). All indexes are zero-based, + the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); + +/* For CvMat or IplImage number of indices should be 2 + (row index (y) goes first, column index (x) goes next). + For CvMatND or CvSparseMat number of infices should match number of and + indices order should match the array dimension order. */ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/* value = arr(idx0,idx1,...) */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/* for 1-channel arrays */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/* arr(idx0,idx1,...) = value */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/* for 1-channel arrays */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/* clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/* Converts CvArr (IplImage or CvMat,...) to CvMat. + If the last parameter is non-zero, function can + convert multi(>2)-dimensional array to CvMat as long as + the last array's dimension is continous. The resultant + matrix will be have appropriate (a huge) number of rows */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/* Converts CvArr (IplImage or CvMat) to IplImage */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/* Changes a shape of multi-dimensional array. + new_cn == 0 means that number of channels remains unchanged. + new_dims == 0 means that number and sizes of dimensions remain the same + (unless they need to be changed to set the new number of channels) + if new_dims == 1, there is no need to specify new dimension sizes + The resultant configuration should be achievable w/o data copying. + If the resultant array is sparse, CvSparseMat header should be passed + to the function else if the result is 1 or 2 dimensional, + CvMat header should be passed to the function + else CvMatND header should be passed */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/* Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/* Allocates array data */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/* Releases array data */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/* Attaches user data to the array header. The step is reffered to + the pre-last dimension. That is, all the planes of the array + must be joint (w/o gaps) */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/* Retrieves raw data of CvMat, IplImage or CvMatND. + In the latter case the function raises an error if + the array can not be represented as a matrix */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/* Returns width and height of array in elements */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/* Copies source array to destination array */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Sets all or "masked" elements of input array + to the same value*/ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Clears all the array elements (sets them to 0) */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/* Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/* Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/* Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/* Performs linear transformation on every source array element: + dst(x,y,c) = scale*src(x,y,c)+shift. + Arbitrary combination of input and output array depths are allowed + (number of channels must be the same), thus the function can be used + for type conversion */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/* Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/* checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/* dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/* dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/* element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/* dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/* dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/* dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/* dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/* The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/* dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/* dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/* dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/* Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/* Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/* Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/* Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/* Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/* Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +/* Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/* Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/* Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/* Calculates cross product of two 3d vectors */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/* Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/* Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/* Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/* Tranposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/* Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/* Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/* Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/* Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/* Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/* Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/* Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/* Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/* Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/* Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/* Calculates covariation matrix for a set of vectors */ +/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/* do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/* scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/* all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/* all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/* Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/* Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/* Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/* Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/* Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/* types of array norm */ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) + +/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + + +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 + +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /* divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /* transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ + +/* Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) */ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/* Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/* Discrete Cosine Transform */ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/* Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/* Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/* Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/* Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/* Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/* Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/* Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, size_t header_size, + size_t elem_size, CvMemStorage* storage ); + +/* Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/* Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/* Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/* Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/* Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/* Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/* Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/* Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/* Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/* Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/* Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/* Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/* Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/* Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/* Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/* Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/* Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/* Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/* Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/* Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/* a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/* Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/* Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/* Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/* Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/* Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/* Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, (CvSetElem**)&elem ); + return elem; +} + +/* Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/* Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/* Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx ) +{ + CvSetElem* elem = (CvSetElem*)cvGetSeqElem( (CvSeq*)set_header, idx ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/* Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/* Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/* Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/* Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/* Link two vertices specifed by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/* Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/* Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/* Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/* Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/* Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/* Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/* Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/* flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/* Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/* Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/* Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/* Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 ) +#define CV_FILLED -1 + +#define CV_AA 16 + +/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2), + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/* Draws a rectangle specified by a CvRect structure */ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/* Draws a circle with specified center and radius. + Thickness works in the same way as with cvRectangle */ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector, + depending on , and parameters. The resultant figure + is rotated by . All the angles are in degrees */ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes; + axes.width = cvRound(box.size.width*0.5); + axes.height = cvRound(box.size.height*0.5); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/* Fills convex or monotonous polygon. */ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Fills an area bounded by one or more arbitrary polygons */ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws one or more polygonal curves */ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/* Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + (0<=xptr will point + to pt1 (or pt2, see left_to_right description) location in the image. + Returns the number of pixels on the line between the ending points. */ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +/* Moves iterator to the next line point */ +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +/* basic font types */ +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +/* font flags */ +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/* Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; //Qt: bool italic /* =CV_FONT_* */ + const int* ascii; /* font data and metrics */ + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; /* slope coefficient: 0 - normal, >0 - italic */ + int thickness; //Qt: weight /* letters thickness */ + float dx; /* horizontal interval between letters */ + int line_type; //Qt: PointSize +} +CvFont; + +/* Initializes font structure used further in cvPutText */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/* Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont */ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/* Calculates bounding box of text stroke (useful for alignment) */ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + + + +/* Unpacks color value, if arrtype is CV_8UC?, is treated as + packed color value, otherwise the first channels (depending on arrtype) + of destination scalar are set to the same value = */ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/* Returns the polygon points which make up the given ellipse. The ellipse is define by + the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep + of the ellipse arc can be done by spcifying arc_start and arc_end to be something + other than 0 and 360, respectively. The input array 'pts' must be large enough to + hold the result. The total number of points stored into 'pts' is returned by this + function. */ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/* Draws contour outlines or filled interiors on the image */ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/* Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/* Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/* Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/* Gathers pointers to all the sequences, + accessible from the , to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/* The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/* Add the function pointers table with associated information to the IPP primitives list */ +CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info ); + +/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +/* Retrieves information about the registered modules and loaded optimized plugins */ +CVAPI(void) cvGetModuleInfo( const char* module_name, + const char** version, + const char** loaded_addon_plugins ); + +typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata); +typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata); + +/* Set user-defined memory managment functions (substitutors for malloc and free) that + will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */ +CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL), + CvFreeFunc free_func CV_DEFAULT(NULL), + void* userdata CV_DEFAULT(NULL)); + + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/* opens existing or creates new file storage */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/* closes file storage and deallocates buffers */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/* returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/* starts writing compound structure (map or sequence) */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* finishes writing compound structure */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/* writes an integer */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/* writes a floating-point number */ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/* writes a string */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/* writes a comment */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/* writes instance of a standard type (matrix, image, sequence, graph etc.) + or user-defined type */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* starts the next stream */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/* helper function: writes multiple integer or floating-point numbers */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/* returns the hash entry corresponding to the specified literal key string or 0 + if there is no such a key in the storage */ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/* this is a slower version of cvGetFileNode that takes the key as a literal string */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + + +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + + +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + + +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/* decodes standard or user-defined object and returns it */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/* decodes standard or user-defined object and returns it */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/* starts reading data from sequence or scalar numeric node */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/* reads multiple numbers and stores them to array */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/* combination of two previous functions for easier reading of whole sequences */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/* writes a copy of file node to file storage */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/* returns name of file node */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); +CVAPI(void) cvUnregisterType( const char* type_name ); +CVAPI(CvTypeInfo*) cvFirstType(void); +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/* universal functions */ +CVAPI(void) cvRelease( void** struct_ptr ); +CVAPI(void*) cvClone( const void* struct_ptr ); + +/* simple API for reading/writing data */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/* helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_AVX 10 +#define CV_HARDWARE_MAX_FEATURE 255 + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/* retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/* get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/* Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/* Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/* Retrives current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/* Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/* Sets error status and performs some additonal actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/* Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/* Retrieves detailed information about the last error occured */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/* Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/* Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/* + Output to: + cvNulDevReport - nothing + cvStdErrReport - console(fprintf(stderr,...)) + cvGuiBoxReport - MessageBox(WIN32) + */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ERRCHK(func,context) \ +{if (cvGetErrStatus() >= 0) \ +{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}} + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk)) + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/* + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/* Simplified form of CV_ERROR */ +#define CV_ERROR_FROM_CODE( code ) \ + CV_ERROR( code, "" ) + +/* + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/* + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error procesing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/* Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +#ifdef __cplusplus +} + +// classes for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvModule +{ + CvModule( CvModuleInfo* _info ); + ~CvModule(); + CvModuleInfo* info; + + static CvModuleInfo* first; + static CvModuleInfo* last; +}; + +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +#endif + +#endif diff --git a/OpenCV/Headers/core/cuda_devptrs.hpp b/OpenCV/Headers/core/cuda_devptrs.hpp new file mode 100644 index 0000000000..373ff290b2 --- /dev/null +++ b/OpenCV/Headers/core/cuda_devptrs.hpp @@ -0,0 +1,185 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_DEVPTRS_HPP__ +#define __OPENCV_CORE_DEVPTRS_HPP__ + +#ifdef __cplusplus + +#ifdef __CUDACC__ + #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__ +#else + #define __CV_GPU_HOST_DEVICE__ +#endif + +namespace cv +{ + namespace gpu + { + // Simple lightweight structures that encapsulates information about an image on device. + // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile + + template struct StaticAssert; + template <> struct StaticAssert {static __CV_GPU_HOST_DEVICE__ void check(){}}; + + template struct DevPtr + { + typedef T elem_type; + typedef int index_type; + + enum { elem_size = sizeof(elem_type) }; + + T* data; + + __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {} + __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {} + + __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; } + __CV_GPU_HOST_DEVICE__ operator T*() { return data; } + __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; } + }; + + template struct PtrSz : public DevPtr + { + __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {} + __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr(data_), size(size_) {} + + size_t size; + }; + + template struct PtrStep : public DevPtr + { + __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {} + __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr(data_), step(step_) {} + + /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */ + size_t step; + + __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr::data + y * step); } + __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr::data + y * step); } + + __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStepSz : public PtrStep + { + __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {} + __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_) + : PtrStep(data_, step_), cols(cols_), rows(rows_) {} + + template + explicit PtrStepSz(const PtrStepSz& d) : PtrStep((T*)d.data, d.step), cols(d.cols), rows(d.rows){} + + int cols; + int rows; + }; + + typedef PtrStepSz PtrStepSzb; + typedef PtrStepSz PtrStepSzf; + typedef PtrStepSz PtrStepSzi; + + typedef PtrStep PtrStepb; + typedef PtrStep PtrStepf; + typedef PtrStep PtrStepi; + + +#if defined __GNUC__ + #define __CV_GPU_DEPR_BEFORE__ + #define __CV_GPU_DEPR_AFTER__ __attribute__ ((deprecated)) +#elif defined(__MSVC__) //|| defined(__CUDACC__) + #pragma deprecated(DevMem2D_) + #define __CV_GPU_DEPR_BEFORE__ __declspec(deprecated) + #define __CV_GPU_DEPR_AFTER__ +#else + #define __CV_GPU_DEPR_BEFORE__ + #define __CV_GPU_DEPR_AFTER__ +#endif + + template struct __CV_GPU_DEPR_BEFORE__ DevMem2D_ : public PtrStepSz + { + DevMem2D_() {} + DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz(rows_, cols_, data_, step_) {} + + template + explicit __CV_GPU_DEPR_BEFORE__ DevMem2D_(const DevMem2D_& d) : PtrStepSz(d.rows, d.cols, (T*)d.data, d.step) {} + } __CV_GPU_DEPR_AFTER__ ; + + typedef DevMem2D_ DevMem2Db; + typedef DevMem2Db DevMem2D; + typedef DevMem2D_ DevMem2Df; + typedef DevMem2D_ DevMem2Di; + + template struct PtrElemStep_ : public PtrStep + { + PtrElemStep_(const DevMem2D_& mem) : PtrStep(mem.data, mem.step) + { + StaticAssert<256 % sizeof(T) == 0>::check(); + + PtrStep::step /= PtrStep::elem_size; + } + __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep::data + y * PtrStep::step; } + __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep::data + y * PtrStep::step; } + + __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStep_ : public PtrStep + { + PtrStep_() {} + PtrStep_(const DevMem2D_& mem) : PtrStep(mem.data, mem.step) {} + }; + + typedef PtrElemStep_ PtrElemStep; + typedef PtrElemStep_ PtrElemStepf; + typedef PtrElemStep_ PtrElemStepi; + +//#undef __CV_GPU_DEPR_BEFORE__ +//#undef __CV_GPU_DEPR_AFTER__ + + } +} + +#endif // __cplusplus + +#endif /* __OPENCV_CORE_DEVPTRS_HPP__ */ diff --git a/OpenCV/Headers/core/devmem2d.hpp b/OpenCV/Headers/core/devmem2d.hpp new file mode 100644 index 0000000000..fc2d2f2757 --- /dev/null +++ b/OpenCV/Headers/core/devmem2d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/cuda_devptrs.hpp" diff --git a/OpenCV/Headers/core/eigen.hpp b/OpenCV/Headers/core/eigen.hpp new file mode 100644 index 0000000000..751734eb5a --- /dev/null +++ b/OpenCV/Headers/core/eigen.hpp @@ -0,0 +1,281 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_EIGEN_HPP__ +#define __OPENCV_CORE_EIGEN_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" + +#if defined _MSC_VER && _MSC_VER >= 1200 +#pragma warning( disable: 4714 ) //__forceinline is not inlined +#pragma warning( disable: 4127 ) //conditional expression is constant +#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data +#endif + +namespace cv +{ + +template +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(_rows, _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, 1>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + dst.resize(_rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(1, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, 1, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +//Matx +template +void cv2eigen( const Matx<_Tp, 1, _cols>& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + dst.resize(_cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, 1, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(1, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +} + +#endif + +#endif + diff --git a/OpenCV/Headers/core/gpumat.hpp b/OpenCV/Headers/core/gpumat.hpp new file mode 100644 index 0000000000..dffb344a6b --- /dev/null +++ b/OpenCV/Headers/core/gpumat.hpp @@ -0,0 +1,577 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPUMAT_HPP__ +#define __OPENCV_GPUMAT_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/core/cuda_devptrs.hpp" + +namespace cv { namespace gpu +{ + //////////////////////////////// Initialization & Info //////////////////////// + + //! This is the only function that do not throw exceptions if the library is compiled without Cuda. + CV_EXPORTS int getCudaEnabledDeviceCount(); + + //! Functions below throw cv::Expception if the library is compiled without Cuda. + + CV_EXPORTS void setDevice(int device); + CV_EXPORTS int getDevice(); + + //! Explicitly destroys and cleans up all resources associated with the current device in the current process. + //! Any subsequent API call to this device will reinitialize the device. + CV_EXPORTS void resetDevice(); + + enum FeatureSet + { + FEATURE_SET_COMPUTE_10 = 10, + FEATURE_SET_COMPUTE_11 = 11, + FEATURE_SET_COMPUTE_12 = 12, + FEATURE_SET_COMPUTE_13 = 13, + FEATURE_SET_COMPUTE_20 = 20, + FEATURE_SET_COMPUTE_21 = 21, + FEATURE_SET_COMPUTE_30 = 30, + GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11, + SHARED_ATOMICS = FEATURE_SET_COMPUTE_12, + NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13, + WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30 + }; + + // Gives information about what GPU archs this OpenCV GPU module was + // compiled for + class CV_EXPORTS TargetArchs + { + public: + static bool builtWith(FeatureSet feature_set); + static bool has(int major, int minor); + static bool hasPtx(int major, int minor); + static bool hasBin(int major, int minor); + static bool hasEqualOrLessPtx(int major, int minor); + static bool hasEqualOrGreater(int major, int minor); + static bool hasEqualOrGreaterPtx(int major, int minor); + static bool hasEqualOrGreaterBin(int major, int minor); + private: + TargetArchs(); + }; + + // Gives information about the given GPU + class CV_EXPORTS DeviceInfo + { + public: + // Creates DeviceInfo object for the current GPU + DeviceInfo() : device_id_(getDevice()) { query(); } + + // Creates DeviceInfo object for the given GPU + DeviceInfo(int device_id) : device_id_(device_id) { query(); } + + std::string name() const { return name_; } + + // Return compute capability versions + int majorVersion() const { return majorVersion_; } + int minorVersion() const { return minorVersion_; } + + int multiProcessorCount() const { return multi_processor_count_; } + + size_t freeMemory() const; + size_t totalMemory() const; + + // Checks whether device supports the given feature + bool supports(FeatureSet feature_set) const; + + // Checks whether the GPU module can be run on the given device + bool isCompatible() const; + + int deviceID() const { return device_id_; } + + private: + void query(); + void queryMemory(size_t& free_memory, size_t& total_memory) const; + + int device_id_; + + std::string name_; + int multi_processor_count_; + int majorVersion_; + int minorVersion_; + }; + + CV_EXPORTS void printCudaDeviceInfo(int device); + CV_EXPORTS void printShortCudaDeviceInfo(int device); + + //////////////////////////////// GpuMat /////////////////////////////// + + //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat. + class CV_EXPORTS GpuMat + { + public: + //! default constructor + GpuMat(); + + //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + GpuMat(int rows, int cols, int type); + GpuMat(Size size, int type); + + //! constucts GpuMatrix and fills it with the specified value _s. + GpuMat(int rows, int cols, int type, Scalar s); + GpuMat(Size size, int type, Scalar s); + + //! copy constructor + GpuMat(const GpuMat& m); + + //! constructor for GpuMatrix headers pointing to user-allocated data + GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP); + GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP); + + //! creates a matrix header for a part of the bigger matrix + GpuMat(const GpuMat& m, Range rowRange, Range colRange); + GpuMat(const GpuMat& m, Rect roi); + + //! builds GpuMat from Mat. Perfom blocking upload to device. + explicit GpuMat(const Mat& m); + + //! destructor - calls release() + ~GpuMat(); + + //! assignment operators + GpuMat& operator = (const GpuMat& m); + + //! pefroms blocking upload data to GpuMat. + void upload(const Mat& m); + + //! downloads data from device to host memory. Blocking calls. + void download(Mat& m) const; + + //! returns a new GpuMatrix header for the specified row + GpuMat row(int y) const; + //! returns a new GpuMatrix header for the specified column + GpuMat col(int x) const; + //! ... for the specified row span + GpuMat rowRange(int startrow, int endrow) const; + GpuMat rowRange(Range r) const; + //! ... for the specified column span + GpuMat colRange(int startcol, int endcol) const; + GpuMat colRange(Range r) const; + + //! returns deep copy of the GpuMatrix, i.e. the data is copied + GpuMat clone() const; + //! copies the GpuMatrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo(GpuMat& m) const; + //! copies those GpuMatrix elements to "m" that are marked with non-zero mask elements. + void copyTo(GpuMat& m, const GpuMat& mask) const; + //! converts GpuMatrix to another datatype with optional scalng. See cvConvertScale. + void convertTo(GpuMat& m, int rtype, double alpha = 1, double beta = 0) const; + + void assignTo(GpuMat& m, int type=-1) const; + + //! sets every GpuMatrix element to s + GpuMat& operator = (Scalar s); + //! sets some of the GpuMatrix elements to s, according to the mask + GpuMat& setTo(Scalar s, const GpuMat& mask = GpuMat()); + //! creates alternative GpuMatrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + GpuMat reshape(int cn, int rows = 0) const; + + //! allocates new GpuMatrix data unless the GpuMatrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type); + void create(Size size, int type); + //! decreases reference counter; + // deallocate the data when reference counter reaches 0. + void release(); + + //! swaps with other smart pointer + void swap(GpuMat& mat); + + //! locates GpuMatrix header within a parent GpuMatrix. See below + void locateROI(Size& wholeSize, Point& ofs) const; + //! moves/resizes the current GpuMatrix ROI inside the parent GpuMatrix. + GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright); + //! extracts a rectangular sub-GpuMatrix + // (this is a generalized form of row, rowRange etc.) + GpuMat operator()(Range rowRange, Range colRange) const; + GpuMat operator()(Rect roi) const; + + //! returns true iff the GpuMatrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_GpuMat_CONT(cvGpuMat->type) + bool isContinuous() const; + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvMat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvMat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvMat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvMat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1() const; + //! returns GpuMatrix size: + // width == number of columns, height == number of rows + Size size() const; + //! returns true if GpuMatrix data is NULL + bool empty() const; + + //! returns pointer to y-th row + uchar* ptr(int y = 0); + const uchar* ptr(int y = 0) const; + + //! template version of the above method + template _Tp* ptr(int y = 0); + template const _Tp* ptr(int y = 0) const; + + template operator PtrStepSz<_Tp>() const; + template operator PtrStep<_Tp>() const; + + // Deprecated function + __CV_GPU_DEPR_BEFORE__ template operator DevMem2D_<_Tp>() const __CV_GPU_DEPR_AFTER__; + __CV_GPU_DEPR_BEFORE__ template operator PtrStep_<_Tp>() const __CV_GPU_DEPR_AFTER__; + #undef __CV_GPU_DEPR_BEFORE__ + #undef __CV_GPU_DEPR_AFTER__ + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + + //! the number of rows and columns + int rows, cols; + + //! a distance between successive rows in bytes; includes the gap if any + size_t step; + + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when GpuMatrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + }; + + //! Creates continuous GPU matrix + CV_EXPORTS void createContinuous(int rows, int cols, int type, GpuMat& m); + CV_EXPORTS GpuMat createContinuous(int rows, int cols, int type); + CV_EXPORTS void createContinuous(Size size, int type, GpuMat& m); + CV_EXPORTS GpuMat createContinuous(Size size, int type); + + //! Ensures that size of the given matrix is not less than (rows, cols) size + //! and matrix type is match specified one too + CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m); + CV_EXPORTS void ensureSizeIsEnough(Size size, int type, GpuMat& m); + + CV_EXPORTS GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat &mat); + + //////////////////////////////////////////////////////////////////////// + // Error handling + + CV_EXPORTS void error(const char* error_string, const char* file, const int line, const char* func = ""); + + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + + inline GpuMat::GpuMat() + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + } + + inline GpuMat::GpuMat(int rows_, int cols_, int type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (rows_ > 0 && cols_ > 0) + create(rows_, cols_, type_); + } + + inline GpuMat::GpuMat(Size size_, int type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (size_.height > 0 && size_.width > 0) + create(size_.height, size_.width, type_); + } + + inline GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (rows_ > 0 && cols_ > 0) + { + create(rows_, cols_, type_); + setTo(s_); + } + } + + inline GpuMat::GpuMat(Size size_, int type_, Scalar s_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (size_.height > 0 && size_.width > 0) + { + create(size_.height, size_.width, type_); + setTo(s_); + } + } + + inline GpuMat::~GpuMat() + { + release(); + } + + inline GpuMat GpuMat::clone() const + { + GpuMat m; + copyTo(m); + return m; + } + + inline void GpuMat::assignTo(GpuMat& m, int _type) const + { + if (_type < 0) + m = *this; + else + convertTo(m, _type); + } + + inline size_t GpuMat::step1() const + { + return step / elemSize1(); + } + + inline bool GpuMat::empty() const + { + return data == 0; + } + + template inline _Tp* GpuMat::ptr(int y) + { + return (_Tp*)ptr(y); + } + + template inline const _Tp* GpuMat::ptr(int y) const + { + return (const _Tp*)ptr(y); + } + + inline void swap(GpuMat& a, GpuMat& b) + { + a.swap(b); + } + + inline GpuMat GpuMat::row(int y) const + { + return GpuMat(*this, Range(y, y+1), Range::all()); + } + + inline GpuMat GpuMat::col(int x) const + { + return GpuMat(*this, Range::all(), Range(x, x+1)); + } + + inline GpuMat GpuMat::rowRange(int startrow, int endrow) const + { + return GpuMat(*this, Range(startrow, endrow), Range::all()); + } + + inline GpuMat GpuMat::rowRange(Range r) const + { + return GpuMat(*this, r, Range::all()); + } + + inline GpuMat GpuMat::colRange(int startcol, int endcol) const + { + return GpuMat(*this, Range::all(), Range(startcol, endcol)); + } + + inline GpuMat GpuMat::colRange(Range r) const + { + return GpuMat(*this, Range::all(), r); + } + + inline void GpuMat::create(Size size_, int type_) + { + create(size_.height, size_.width, type_); + } + + inline GpuMat GpuMat::operator()(Range _rowRange, Range _colRange) const + { + return GpuMat(*this, _rowRange, _colRange); + } + + inline GpuMat GpuMat::operator()(Rect roi) const + { + return GpuMat(*this, roi); + } + + inline bool GpuMat::isContinuous() const + { + return (flags & Mat::CONTINUOUS_FLAG) != 0; + } + + inline size_t GpuMat::elemSize() const + { + return CV_ELEM_SIZE(flags); + } + + inline size_t GpuMat::elemSize1() const + { + return CV_ELEM_SIZE1(flags); + } + + inline int GpuMat::type() const + { + return CV_MAT_TYPE(flags); + } + + inline int GpuMat::depth() const + { + return CV_MAT_DEPTH(flags); + } + + inline int GpuMat::channels() const + { + return CV_MAT_CN(flags); + } + + inline Size GpuMat::size() const + { + return Size(cols, rows); + } + + inline uchar* GpuMat::ptr(int y) + { + CV_DbgAssert((unsigned)y < (unsigned)rows); + return data + step * y; + } + + inline const uchar* GpuMat::ptr(int y) const + { + CV_DbgAssert((unsigned)y < (unsigned)rows); + return data + step * y; + } + + inline GpuMat& GpuMat::operator = (Scalar s) + { + setTo(s); + return *this; + } + + template inline GpuMat::operator PtrStepSz() const + { + return PtrStepSz(rows, cols, (T*)data, step); + } + + template inline GpuMat::operator PtrStep() const + { + return PtrStep((T*)data, step); + } + + template inline GpuMat::operator DevMem2D_() const + { + return DevMem2D_(rows, cols, (T*)data, step); + } + + template inline GpuMat::operator PtrStep_() const + { + return PtrStep_(static_cast< DevMem2D_ >(*this)); + } + + inline GpuMat createContinuous(int rows, int cols, int type) + { + GpuMat m; + createContinuous(rows, cols, type, m); + return m; + } + + inline void createContinuous(Size size, int type, GpuMat& m) + { + createContinuous(size.height, size.width, type, m); + } + + inline GpuMat createContinuous(Size size, int type) + { + GpuMat m; + createContinuous(size, type, m); + return m; + } + + inline void ensureSizeIsEnough(Size size, int type, GpuMat& m) + { + ensureSizeIsEnough(size.height, size.width, type, m); + } + + inline void createContinuous(int rows, int cols, int type, GpuMat& m) + { + int area = rows * cols; + if (!m.isContinuous() || m.type() != type || m.size().area() != area) + ensureSizeIsEnough(1, area, type, m); + m = m.reshape(0, rows); + } + + inline void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m) + { + if (m.type() == type && m.rows >= rows && m.cols >= cols) + m = m(Rect(0, 0, cols, rows)); + else + m.create(rows, cols, type); + } + + inline GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat &mat) + { + if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols) + return mat(Rect(0, 0, cols, rows)); + return mat = GpuMat(rows, cols, type); + } +}} + +#endif // __cplusplus + +#endif // __OPENCV_GPUMAT_HPP__ diff --git a/OpenCV/Headers/core/internal.hpp b/OpenCV/Headers/core/internal.hpp new file mode 100644 index 0000000000..93e56c3ab3 --- /dev/null +++ b/OpenCV/Headers/core/internal.hpp @@ -0,0 +1,788 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* The header is for internal use and it is likely to change. + It contains some macro definitions that are used in cxcore, cv, cvaux + and, probably, other libraries. If you need some of this functionality, + the safe way is to copy it into your code and rename the macros. +*/ +#ifndef __OPENCV_CORE_INTERNAL_HPP__ +#define __OPENCV_CORE_INTERNAL_HPP__ + +#include + +#if defined WIN32 || defined _WIN32 +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +#endif + +#if !defined WIN32 && !defined WINCE +# include +#endif + +#ifdef __BORLANDC__ +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +# define CV_DLL +# undef _CV_ALWAYS_PROFILE_ +# define _CV_ALWAYS_NO_PROFILE_ +#endif + +#ifndef FALSE +# define FALSE 0 +#endif +#ifndef TRUE +# define TRUE 1 +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#ifdef HAVE_IPP +# include "ipp.h" + +CV_INLINE IppiSize ippiSize(int width, int height) +{ + IppiSize size = { width, height }; + return size; +} +#endif + +#ifndef IPPI_CALL +# define IPPI_CALL(func) CV_Assert((func) >= 0) +#endif + +#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) +# include "emmintrin.h" +# define CV_SSE 1 +# define CV_SSE2 1 +# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include "pmmintrin.h" +# define CV_SSE3 1 +# endif +# if defined __SSSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include "tmmintrin.h" +# define CV_SSSE3 1 +# endif +# if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_SSE4_1 1 +# endif +# if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_SSE4_2 1 +# endif +# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX +// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 +# include +# define CV_AVX 1 +# if defined(_XCR_XFEATURE_ENABLED_MASK) +# define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK) +# else +# define __xgetbv() 0 +# endif +# endif +#endif + +#ifdef __ARM_NEON__ +# include +# define CV_NEON 1 +# define CPU_HAS_NEON_FEATURE (true) +#endif + +#ifndef CV_SSE +# define CV_SSE 0 +#endif +#ifndef CV_SSE2 +# define CV_SSE2 0 +#endif +#ifndef CV_SSE3 +# define CV_SSE3 0 +#endif +#ifndef CV_SSSE3 +# define CV_SSSE3 0 +#endif +#ifndef CV_SSE4_1 +# define CV_SSE4_1 0 +#endif +#ifndef CV_SSE4_2 +# define CV_SSE4_2 0 +#endif +#ifndef CV_AVX +# define CV_AVX 0 +#endif +#ifndef CV_NEON +# define CV_NEON 0 +#endif + +#ifdef HAVE_TBB +# include "tbb/tbb_stddef.h" +# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 +# include "tbb/tbb.h" +# include "tbb/task.h" +# undef min +# undef max +# else +# undef HAVE_TBB +# endif +#endif + +#ifdef HAVE_EIGEN +# if defined __GNUC__ && defined __APPLE__ +# pragma GCC diagnostic ignored "-Wshadow" +# endif +# include +# include "opencv2/core/eigen.hpp" +#endif + +#ifdef __cplusplus + +namespace cv +{ +#ifdef HAVE_TBB + + typedef tbb::blocked_range BlockedRange; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + tbb::parallel_for(range, body); + } + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + tbb::parallel_do(first, last, body); + } + + typedef tbb::split Split; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + tbb::parallel_reduce(range, body); + } + + typedef tbb::concurrent_vector ConcurrentRectVector; + typedef tbb::concurrent_vector ConcurrentDoubleVector; +#else + class BlockedRange + { + public: + BlockedRange() : _begin(0), _end(0), _grainsize(0) {} + BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {} + int begin() const { return _begin; } + int end() const { return _end; } + int grainsize() const { return _grainsize; } + + protected: + int _begin, _end, _grainsize; + }; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + body(range); + } + typedef std::vector ConcurrentRectVector; + typedef std::vector ConcurrentDoubleVector; + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + for( ; first != last; ++first ) + body(*first); + } + + class Split {}; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + body(range); + } +#endif +} //namespace cv + +#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ + static ::cv::Algorithm* create##classname() \ + { \ + return new classname; \ + } \ + \ + static ::cv::AlgorithmInfo& classname##_info() \ + { \ + static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \ + return classname##_info_var; \ + } \ + \ + static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \ + \ + ::cv::AlgorithmInfo* classname::info() const \ + { \ + static volatile bool initialized = false; \ + \ + if( !initialized ) \ + { \ + initialized = true; \ + classname obj; \ + memberinit; \ + } \ + return &classname##_info(); \ + } + +#endif //__cplusplus + +/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */ +#define CV_MAX_INLINE_MAT_OP_SIZE 10 + +/* maximal linear size of matrix to allocate it on stack. */ +#define CV_MAX_LOCAL_MAT_SIZE 32 + +/* maximal size of local memory storage */ +#define CV_MAX_LOCAL_SIZE \ + (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double)) + +/* default image row align (in bytes) */ +#define CV_DEFAULT_IMAGE_ROW_ALIGN 4 + +/* matrices are continuous by default */ +#define CV_DEFAULT_MAT_ROW_ALIGN 1 + +/* maximum size of dynamic memory buffer. + cvAlloc reports an error if a larger block is requested. */ +#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2))) + +/* the alignment of all the allocated buffers */ +#define CV_MALLOC_ALIGN 16 + +/* default alignment for dynamic data strucutures, resided in storages. */ +#define CV_STRUCT_ALIGN ((int)sizeof(double)) + +/* default storage block size */ +#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128) + +/* default memory block for sparse array elements */ +#define CV_SPARSE_MAT_BLOCK (1<<12) + +/* initial hash table size */ +#define CV_SPARSE_HASH_SIZE0 (1<<10) + +/* maximal average node_count/hash_size ratio beyond which hash table is resized */ +#define CV_SPARSE_HASH_RATIO 3 + +/* max length of strings */ +#define CV_MAX_STRLEN 1024 + +#if 0 /*def CV_CHECK_FOR_NANS*/ +# define CV_CHECK_NANS( arr ) cvCheckArray((arr)) +#else +# define CV_CHECK_NANS( arr ) +#endif + +/****************************************************************************************\ +* Common declarations * +\****************************************************************************************/ + +/* get alloca declaration */ +#ifdef __GNUC__ +# undef alloca +# define alloca __builtin_alloca +# define CV_HAVE_ALLOCA 1 +#elif defined WIN32 || defined _WIN32 || \ + defined WINCE || defined _MSC_VER || defined __BORLANDC__ +# include +# define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA_H +# include +# define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA +# include +# define CV_HAVE_ALLOCA 1 +#else +# undef CV_HAVE_ALLOCA +#endif + +#ifdef __GNUC__ +# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +# define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +# define CV_DECL_ALIGNED(x) +#endif + +#if CV_HAVE_ALLOCA +/* ! DO NOT make it an inline function */ +# define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN ) +#endif + +#ifndef CV_IMPL +# define CV_IMPL CV_EXTERN_C +#endif + +#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; } + +/* default step, set in case of continuous data + to work around checks for valid step in some ipp functions */ +#define CV_STUB_STEP (1 << 30) + +#define CV_SIZEOF_FLOAT ((int)sizeof(float)) +#define CV_SIZEOF_SHORT ((int)sizeof(short)) + +#define CV_ORIGIN_TL 0 +#define CV_ORIGIN_BL 1 + +/* IEEE754 constants and macros */ +#define CV_POS_INF 0x7f800000 +#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */ +#define CV_1F 0x3f800000 +#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0)) +#define CV_TOGGLE_DBL(x) \ + ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0)) + +#define CV_NOP(a) (a) +#define CV_ADD(a, b) ((a) + (b)) +#define CV_SUB(a, b) ((a) - (b)) +#define CV_MUL(a, b) ((a) * (b)) +#define CV_AND(a, b) ((a) & (b)) +#define CV_OR(a, b) ((a) | (b)) +#define CV_XOR(a, b) ((a) ^ (b)) +#define CV_ANDN(a, b) (~(a) & (b)) +#define CV_ORN(a, b) (~(a) | (b)) +#define CV_SQR(a) ((a) * (a)) + +#define CV_LT(a, b) ((a) < (b)) +#define CV_LE(a, b) ((a) <= (b)) +#define CV_EQ(a, b) ((a) == (b)) +#define CV_NE(a, b) ((a) != (b)) +#define CV_GT(a, b) ((a) > (b)) +#define CV_GE(a, b) ((a) >= (b)) + +#define CV_NONZERO(a) ((a) != 0) +#define CV_NONZERO_FLT(a) (((a)+(a)) != 0) + +/* general-purpose saturation macros */ +#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0) +#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128) +#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0) +#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768) +#define CV_CAST_32S(t) (int)(t) +#define CV_CAST_64S(t) (int64)(t) +#define CV_CAST_32F(t) (float)(t) +#define CV_CAST_64F(t) (double)(t) + +#define CV_PASTE2(a,b) a##b +#define CV_PASTE(a,b) CV_PASTE2(a,b) + +#define CV_EMPTY +#define CV_MAKE_STR(a) #a + +#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x))) + +#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0]))) + +#define cvUnsupportedFormat "Unsupported format" + +CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) ) +{ + assert( (align & (align-1)) == 0 ); + return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) ); +} + +CV_INLINE int cvAlign( int size, int align ) +{ + assert( (align & (align-1)) == 0 && size < INT_MAX ); + return (size + align - 1) & -align; +} + +CV_INLINE CvSize cvGetMatSize( const CvMat* mat ) +{ + CvSize size; + size.width = mat->cols; + size.height = mat->rows; + return size; +} + +#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n))) + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm. + ---------------------------------------------- + Using this macro user can declare customized sort function that can be much faster + than built-in qsort function because of lower overhead on elements + comparison and exchange. The macro takes less_than (or LT) argument - a macro or function + that takes 2 arguments returns non-zero if the first argument should be before the second + one in the sorted sequence and zero otherwise. + + Example: + + Suppose that the task is to sort points by ascending of y coordinates and if + y's are equal x's should ascend. + + The code is: + ------------------------------------------------------------------------------ + #define cmp_pts( pt1, pt2 ) \ + ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x)) + + [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts ) + ------------------------------------------------------------------------------ + + After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );" + is available to user. + + aux is an additional parameter, which can be used when comparing elements. + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \ +void func_name( T *array, size_t total, user_data_type aux ) \ +{ \ + int isort_thresh = 7; \ + T t; \ + int sp = 0; \ + \ + struct \ + { \ + T *lb; \ + T *ub; \ + } \ + stack[48]; \ + \ + aux = aux; \ + \ + if( total <= 1 ) \ + return; \ + \ + stack[0].lb = array; \ + stack[0].ub = array + (total - 1); \ + \ + while( sp >= 0 ) \ + { \ + T* left = stack[sp].lb; \ + T* right = stack[sp--].ub; \ + \ + for(;;) \ + { \ + int i, n = (int)(right - left) + 1, m; \ + T* ptr; \ + T* ptr2; \ + \ + if( n <= isort_thresh ) \ + { \ + insert_sort: \ + for( ptr = left + 1; ptr <= right; ptr++ ) \ + { \ + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \ + CV_SWAP( ptr2[0], ptr2[-1], t ); \ + } \ + break; \ + } \ + else \ + { \ + T* left0; \ + T* left1; \ + T* right0; \ + T* right1; \ + T* pivot; \ + T* a; \ + T* b; \ + T* c; \ + int swap_cnt = 0; \ + \ + left0 = left; \ + right0 = right; \ + pivot = left + (n/2); \ + \ + if( n > 40 ) \ + { \ + int d = n / 8; \ + a = left, b = left + d, c = left + 2*d; \ + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = pivot - d, b = pivot, c = pivot + d; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = right - 2*d, b = right - d, c = right; \ + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + } \ + \ + a = left, b = pivot, c = right; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + if( pivot != left0 ) \ + { \ + CV_SWAP( *pivot, *left0, t ); \ + pivot = left0; \ + } \ + left = left1 = left0 + 1; \ + right = right1 = right0; \ + \ + for(;;) \ + { \ + while( left <= right && !LT(*pivot, *left) ) \ + { \ + if( !LT(*left, *pivot) ) \ + { \ + if( left > left1 ) \ + CV_SWAP( *left1, *left, t ); \ + swap_cnt = 1; \ + left1++; \ + } \ + left++; \ + } \ + \ + while( left <= right && !LT(*right, *pivot) ) \ + { \ + if( !LT(*pivot, *right) ) \ + { \ + if( right < right1 ) \ + CV_SWAP( *right1, *right, t ); \ + swap_cnt = 1; \ + right1--; \ + } \ + right--; \ + } \ + \ + if( left > right ) \ + break; \ + CV_SWAP( *left, *right, t ); \ + swap_cnt = 1; \ + left++; \ + right--; \ + } \ + \ + if( swap_cnt == 0 ) \ + { \ + left = left0, right = right0; \ + goto insert_sort; \ + } \ + \ + n = MIN( (int)(left1 - left0), (int)(left - left1) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left0[i], left[i-n], t ); \ + \ + n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left[i], right0[i-n+1], t ); \ + n = (int)(left - left1); \ + m = (int)(right1 - right); \ + if( n > 1 ) \ + { \ + if( m > 1 ) \ + { \ + if( n > m ) \ + { \ + stack[++sp].lb = left0; \ + stack[sp].ub = left0 + n - 1; \ + left = right0 - m + 1, right = right0; \ + } \ + else \ + { \ + stack[++sp].lb = right0 - m + 1; \ + stack[sp].ub = right0; \ + left = left0, right = left0 + n - 1; \ + } \ + } \ + else \ + left = left0, right = left0 + n - 1; \ + } \ + else if( m > 1 ) \ + left = right0 - m + 1, right = right0; \ + else \ + break; \ + } \ + } \ + } \ +} + +#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \ + CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int ) + +/****************************************************************************************\ +* Structures and macros for integration with IPP * +\****************************************************************************************/ + +/* IPP-compatible return codes */ +typedef enum CvStatus +{ + CV_BADMEMBLOCK_ERR = -113, + CV_INPLACE_NOT_SUPPORTED_ERR= -112, + CV_UNMATCHED_ROI_ERR = -111, + CV_NOTFOUND_ERR = -110, + CV_BADCONVERGENCE_ERR = -109, + + CV_BADDEPTH_ERR = -107, + CV_BADROI_ERR = -106, + CV_BADHEADER_ERR = -105, + CV_UNMATCHED_FORMATS_ERR = -104, + CV_UNSUPPORTED_COI_ERR = -103, + CV_UNSUPPORTED_CHANNELS_ERR = -102, + CV_UNSUPPORTED_DEPTH_ERR = -101, + CV_UNSUPPORTED_FORMAT_ERR = -100, + + CV_BADARG_ERR = -49, //ipp comp + CV_NOTDEFINED_ERR = -48, //ipp comp + + CV_BADCHANNELS_ERR = -47, //ipp comp + CV_BADRANGE_ERR = -44, //ipp comp + CV_BADSTEP_ERR = -29, //ipp comp + + CV_BADFLAG_ERR = -12, + CV_DIV_BY_ZERO_ERR = -11, //ipp comp + CV_BADCOEF_ERR = -10, + + CV_BADFACTOR_ERR = -7, + CV_BADPOINT_ERR = -6, + CV_BADSCALE_ERR = -4, + CV_OUTOFMEM_ERR = -3, + CV_NULLPTR_ERR = -2, + CV_BADSIZE_ERR = -1, + CV_NO_ERR = 0, + CV_OK = CV_NO_ERR +} +CvStatus; + +#define CV_NOTHROW throw() + +typedef struct CvFuncTable +{ + void* fn_2d[CV_DEPTH_MAX]; +} +CvFuncTable; + +typedef struct CvBigFuncTable +{ + void* fn_2d[CV_DEPTH_MAX*4]; +} CvBigFuncTable; + +#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \ + (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \ + (tab).fn_2d[CV_8S] = 0; \ + (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \ + (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \ + (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \ + (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ + (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG + +#ifdef __cplusplus +//! OpenGL extension table +class CV_EXPORTS CvOpenGlFuncTab +{ +public: + virtual ~CvOpenGlFuncTab(); + + virtual void genBuffers(int n, unsigned int* buffers) const = 0; + virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0; + + virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0; + virtual void bufferSubData(unsigned int target, ptrdiff_t offset, ptrdiff_t size, const void* data) const = 0; + + virtual void bindBuffer(unsigned int target, unsigned int buffer) const = 0; + + virtual void* mapBuffer(unsigned int target, unsigned int access) const = 0; + virtual void unmapBuffer(unsigned int target) const = 0; + + virtual void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const = 0; + + virtual bool isGlContextInitialized() const = 0; +}; + +CV_EXPORTS void icvSetOpenGlFuncTab(const CvOpenGlFuncTab* tab); + +CV_EXPORTS bool icvCheckGlError(const char* file, const int line, const char* func = ""); + +#if defined(__GNUC__) + #define CV_CheckGlError() CV_DbgAssert( (::icvCheckGlError(__FILE__, __LINE__, __func__)) ) +#else + #define CV_CheckGlError() CV_DbgAssert( (::icvCheckGlError(__FILE__, __LINE__)) ) +#endif + +#endif //__cplusplus + +#endif // __OPENCV_CORE_INTERNAL_HPP__ diff --git a/OpenCV/Headers/core/mat.hpp b/OpenCV/Headers/core/mat.hpp new file mode 100644 index 0000000000..92301cf3b4 --- /dev/null +++ b/OpenCV/Headers/core/mat.hpp @@ -0,0 +1,2605 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ +#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +namespace cv +{ + +//////////////////////////////// Mat //////////////////////////////// + +inline void Mat::initEmpty() +{ + flags = MAGIC_VAL; + dims = rows = cols = 0; + data = datastart = dataend = datalimit = 0; + refcount = 0; + allocator = 0; +} + +inline Mat::Mat() : size(&rows) +{ + initEmpty(); +} + +inline Mat::Mat(int _rows, int _cols, int _type) : size(&rows) +{ + initEmpty(); + create(_rows, _cols, _type); +} + +inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_rows, _cols, _type); + *this = _s; +} + +inline Mat::Mat(Size _sz, int _type) : size(&rows) +{ + initEmpty(); + create( _sz.height, _sz.width, _type ); +} + +inline Mat::Mat(Size _sz, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_sz.height, _sz.width, _type); + *this = _s; +} + +inline Mat::Mat(int _dims, const int* _sz, int _type) : size(&rows) +{ + initEmpty(); + create(_dims, _sz, _type); +} + +inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_dims, _sz, _type); + *this = _s; +} + +inline Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), + datalimit(m.datalimit), allocator(m.allocator), size(&rows) +{ + if( refcount ) + CV_XADD(refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + +inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + + +template inline Mat::Mat(const vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this); +} + + +template inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(n), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)vec.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this); +} + + +template inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(m), cols(n), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = cols*sizeof(_Tp); + step[1] = sizeof(_Tp); + data = datastart = (uchar*)M.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this); +} + + +template inline Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(2), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(2, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + + +template inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(3), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(3, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + + +template inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + *this = *commaInitializer; +} + +inline Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +inline Mat& Mat::operator = (const Mat& m) +{ + if( this != &m ) + { + if( m.refcount ) + CV_XADD(m.refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + refcount = m.refcount; + allocator = m.allocator; + } + return *this; +} + +inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); } +inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); } +inline Mat Mat::rowRange(int startrow, int endrow) const + { return Mat(*this, Range(startrow, endrow), Range::all()); } +inline Mat Mat::rowRange(const Range& r) const + { return Mat(*this, r, Range::all()); } +inline Mat Mat::colRange(int startcol, int endcol) const + { return Mat(*this, Range::all(), Range(startcol, endcol)); } +inline Mat Mat::colRange(const Range& r) const + { return Mat(*this, Range::all(), r); } + +inline Mat Mat::diag(const Mat& d) +{ + CV_Assert( d.cols == 1 || d.rows == 1 ); + int len = d.rows + d.cols - 1; + Mat m(len, len, d.type(), Scalar(0)), md = m.diag(); + if( d.cols == 1 ) + d.copyTo(md); + else + transpose(d, md); + return m; +} + +inline Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +inline void Mat::assignTo( Mat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +inline void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +inline void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +inline void Mat::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +inline void Mat::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + deallocate(); + data = datastart = dataend = datalimit = 0; + size.p[0] = 0; + refcount = 0; +} + +inline Mat Mat::operator()( Range _rowRange, Range _colRange ) const +{ + return Mat(*this, _rowRange, _colRange); +} + +inline Mat Mat::operator()( const Rect& roi ) const +{ return Mat(*this, roi); } + +inline Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline Mat::operator CvMat() const +{ + CV_DbgAssert(dims <= 2); + CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data); + m.step = (int)step[0]; + m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG); + return m; +} + +inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; } +inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; } +inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; } +inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); } +inline int Mat::type() const { return CV_MAT_TYPE(flags); } +inline int Mat::depth() const { return CV_MAT_DEPTH(flags); } +inline int Mat::channels() const { return CV_MAT_CN(flags); } +inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); } +inline bool Mat::empty() const { return data == 0 || total() == 0; } +inline size_t Mat::total() const +{ + if( dims <= 2 ) + return (size_t)rows*cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +inline uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +inline const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +template inline _Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0]*y); +} + +template inline const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && data && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0]*y); +} + + +inline uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +inline const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +template inline _Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +inline uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +inline const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +template inline _Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +inline uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +inline const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +template inline _Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat::at(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat::at(int i0) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + if( isContinuous() || size.p[0] == 1 ) + return ((_Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(_Tp*)(data + step.p[0]*i0); + int i = i0/cols, j = i0 - i*cols; + return ((_Tp*)(data + step.p[0]*i))[j]; +} + +template inline const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + if( isContinuous() || size.p[0] == 1 ) + return ((const _Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(const _Tp*)(data + step.p[0]*i0); + int i = i0/cols, j = i0 - i*cols; + return ((const _Tp*)(data + step.p[0]*i))[j]; +} + +template inline _Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(i0, i1, i2); +} +template inline const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(i0, i1, i2); +} +template inline _Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx); +} +template inline const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx); +} +template _Tp& Mat::at(const Vec& idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx.val); +} +template inline const _Tp& Mat::at(const Vec& idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx.val); +} + + +template inline MatConstIterator_<_Tp> Mat::begin() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline MatConstIterator_<_Tp> Mat::end() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline MatIterator_<_Tp> Mat::begin() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline MatIterator_<_Tp> Mat::end() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline Mat::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template inline Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + + +template inline void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone(); + return; + } + CV_Assert(DataType<_Tp>::type == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +inline Mat::MSize::MSize(int* _p) : p(_p) {} +inline Size Mat::MSize::operator()() const +{ + CV_DbgAssert(p[-1] <= 2); + return Size(p[1], p[0]); +} +inline const int& Mat::MSize::operator[](int i) const { return p[i]; } +inline int& Mat::MSize::operator[](int i) { return p[i]; } +inline Mat::MSize::operator const int*() const { return p; } + +inline bool Mat::MSize::operator == (const MSize& sz) const +{ + int d = p[-1], dsz = sz.p[-1]; + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + +inline bool Mat::MSize::operator != (const MSize& sz) const +{ + return !(*this == sz); +} + +inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; } +inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; } +inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; } +inline size_t& Mat::MStep::operator[](int i) { return p[i]; } +inline Mat::MStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} +inline Mat::MStep& Mat::MStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + +///////////////////////////////////////////// SVD ////////////////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + Mat mtx = m.getMat(); + SVD svd(mtx, (mtx.rows >= mtx.cols ? 0 : SVD::FULL_UV)); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +template inline Mat_<_Tp>::Mat_() + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; } + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {} + +template inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) {} + +template inline Mat_<_Tp>::Mat_(const Mat& m) + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; } + +template inline Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange) + : Mat(m, _rowRange, _colRange) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) {} + +template template inline + Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline + Mat_<_Tp>::Mat_(const Matx::channel_type,m,n>& M, bool copyData) + : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) {} + +template inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) {} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if( DataType<_Tp>::type == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( DataType<_Tp>::depth == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, DataType<_Tp>::type); +} + + +template inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ return Mat_<_Tp>(Mat::cross(m)); } + +template template inline Mat_<_Tp>::operator Mat_() const +{ return Mat_(*this); } + +template inline Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ return Mat_(*this, Range(y, y+1), Range::all()); } +template inline Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ return Mat_(*this, Range::all(), Range(x, x+1)); } +template inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ return Mat_(Mat::diag(d)); } +template inline Mat_<_Tp> Mat_<_Tp>::clone() const +{ return Mat_(Mat::clone()); } + +template inline size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels ); + return sizeof(_Tp)/DataType<_Tp>::channels; +} +template inline int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == DataType<_Tp>::type ); + return DataType<_Tp>::type; +} +template inline int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth ); + return DataType<_Tp>::depth; +} +template inline int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} +template inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); } +template inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); } + +template inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const +{ return Mat_<_Tp>(*this, _rowRange, _colRange); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ return Mat_<_Tp>(*this, roi); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ return Mat_<_Tp>(*this, ranges); } + +template inline _Tp* Mat_<_Tp>::operator [](int y) +{ return (_Tp*)ptr(y); } +template inline const _Tp* Mat_<_Tp>::operator [](int y) const +{ return (const _Tp*)ptr(y); } + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template template inline _Tp& Mat_<_Tp>::operator ()(const Vec& idx) +{ + return Mat::at<_Tp>(idx); +} + +template template inline const _Tp& Mat_<_Tp>::operator ()(const Vec& idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + + +template inline Mat_<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Vec::channel_type, n>(); +} + +template template inline Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Matx::channel_type, m, n>(); +} + +template inline void +process( const Mat_& m1, Mat_& m2, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src = m1[y]; + T2* dst = m2[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op(src[x]); + } +} + +template inline void +process( const Mat_& m1, const Mat_& m2, Mat_& m3, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src1 = m1[y]; + const T2* src2 = m2[y]; + T3* dst = m3[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op( src1[x], src2[x] ); + } +} + + +/////////////////////////////// Input/Output Arrays ///////////////////////////////// + +template inline _InputArray::_InputArray(const vector<_Tp>& vec) + : flags(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {} + +template inline _InputArray::_InputArray(const _Tp* vec, int n) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)vec), sz(n, 1) {} + +inline _InputArray::_InputArray(const Scalar& s) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&s), sz(1, 4) {} + +template inline _InputArray::_InputArray(const Mat_<_Tp>& m) + : flags(FIXED_TYPE + MAT + DataType<_Tp>::type), obj((void*)&m) {} + +template inline _OutputArray::_OutputArray(vector<_Tp>& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(Mat_<_Tp>& m) + : _InputArray(m) {} +template inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) + : _InputArray(mtx) {} +template inline _OutputArray::_OutputArray(_Tp* vec, int n) + : _InputArray(vec, n) {} + +template inline _OutputArray::_OutputArray(const vector<_Tp>& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const vector >& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const vector >& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} + +template inline _OutputArray::_OutputArray(const Mat_<_Tp>& m) + : _InputArray(m) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx) + : _InputArray(mtx) {} +template inline _OutputArray::_OutputArray(const _Tp* vec, int n) + : _InputArray(vec, n) {} + +//////////////////////////////////// Matrix Expressions ///////////////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp() {}; + virtual ~MatOp() {}; + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + + +class CV_EXPORTS MatExpr +{ +public: + MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {} + MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(), + const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar()) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {} + explicit MatExpr(const Mat& m); + operator Mat() const + { + Mat m; + op->assign(*this, m); + return m; + } + + template operator Mat_<_Tp>() const + { + Mat_<_Tp> m; + op->assign(*this, m, DataType<_Tp>::type); + return m; + } + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d=0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Size size() const; + int type() const; + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + + +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); + +template static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, s, (Mat&)c); +} + + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr abs(const Mat& m); +CV_EXPORTS MatExpr abs(const MatExpr& e); + +template static inline MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + +////////////////////////////// Augmenting algebraic operations ////////////////////////////////// + +inline Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); +} + +template Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); + return *this; +} + +static inline Mat& operator += (const Mat& a, const Mat& b) +{ + add(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator += (const Mat& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + add(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const Mat& b) +{ + subtract(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator -= (const Mat& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + subtract(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const Mat& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat&)a; +} + +static inline Mat& operator *= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const Mat& b) +{ + divide(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator /= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + divide(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +////////////////////////////// Logical operations /////////////////////////////// + +static inline Mat& operator &= (const Mat& a, const Mat& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator &= (const Mat& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator |= (const Mat& a, const Mat& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator |= (const Mat& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Mat& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +/////////////////////////////// Miscellaneous operations ////////////////////////////// + +template void split(const Mat& src, vector >& mv) +{ split(src, (vector&)mv ); } + +////////////////////////////////////////////////////////////// + +template inline MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, DataType<_Tp>::type); +} + +//////////////////////////////// Iterators & Comma initializers ////////////////////////////////// + +inline MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {} + +inline MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_row, _col}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_pt.y, _pt.x}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline uchar* MatConstIterator::operator *() const { return ptr; } + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +inline MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + +template inline MatConstIterator_<_Tp>::MatConstIterator_() {} + +template inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) {} + +template inline MatConstIterator_<_Tp>& + MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) {} + +template inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) {} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + +static inline bool +operator == (const MatConstIterator& a, const MatConstIterator& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator& a, const MatConstIterator& b) +{ return !(a == b); } + +template static inline bool +operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +template static inline bool +operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +static inline bool +operator < (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr < b.ptr; } + +static inline bool +operator > (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr > b.ptr; } + +static inline bool +operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr <= b.ptr; } + +static inline bool +operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr >= b.ptr; } + +CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a); + +static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += -ofs; } + +template static inline MatConstIterator_<_Tp> +operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; } + +inline uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(_Tp*)MatConstIterator::operator [](i); } + +template static inline MatIterator_<_Tp> +operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; } + +template inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ return Mat::begin<_Tp>(); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ return Mat::end<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::begin() +{ return Mat::begin<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::end() +{ return Mat::end<_Tp>(); } + +template inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {} + +template template inline MatCommaInitializer_<_Tp>& +MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); ++this->it; + return *this; +} + +template inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template static inline MatCommaInitializer_<_Tp> +operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +inline SparseMat::SparseMat() +: flags(MAGIC_VAL), hdr(0) +{ +} + +inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type) +: flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +inline SparseMat::SparseMat(const SparseMat& m) +: flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +inline SparseMat::~SparseMat() +{ + release(); +} + +inline SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +inline SparseMat& SparseMat::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +inline SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + + +inline void SparseMat::assignTo( SparseMat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +inline void SparseMat::addref() +{ if( hdr ) CV_XADD(&hdr->refcount, 1); } + +inline void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +inline size_t SparseMat::elemSize() const +{ return CV_ELEM_SIZE(flags); } + +inline size_t SparseMat::elemSize1() const +{ return CV_ELEM_SIZE1(flags); } + +inline int SparseMat::type() const +{ return CV_MAT_TYPE(flags); } + +inline int SparseMat::depth() const +{ return CV_MAT_DEPTH(flags); } + +inline int SparseMat::channels() const +{ return CV_MAT_CN(flags); } + +inline const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +inline size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +inline size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1; +} + +inline size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2; +} + +inline size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int i, d = hdr->dims; + for( i = 1; i < d; i++ ) + h = h*HASH_SCALE + (unsigned)idx[i]; + return h; +} + +template inline _Tp& SparseMat::ref(int i0, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); } + +template inline _Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); } + +template inline _Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); } + +template inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); } + +template inline _Tp& SparseMat::value(Node* n) +{ return *(_Tp*)((uchar*)n + hdr->valueOffset); } + +template inline const _Tp& SparseMat::value(const Node* n) const +{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); } + +inline SparseMat::Node* SparseMat::node(size_t nidx) +{ return (Node*)&hdr->pool[nidx]; } + +inline const SparseMat::Node* SparseMat::node(size_t nidx) const +{ return (const Node*)&hdr->pool[nidx]; } + +inline SparseMatIterator SparseMat::begin() +{ return SparseMatIterator(this); } + +inline SparseMatConstIterator SparseMat::begin() const +{ return SparseMatConstIterator(this); } + +inline SparseMatIterator SparseMat::end() +{ SparseMatIterator it(this); it.seekEnd(); return it; } + +inline SparseMatConstIterator SparseMat::end() const +{ SparseMatConstIterator it(this); it.seekEnd(); return it; } + +template inline SparseMatIterator_<_Tp> SparseMat::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + + +inline SparseMatConstIterator::SparseMatConstIterator() +: m(0), hashidx(0), ptr(0) +{ +} + +inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) +: m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{ +} + +static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return it1.m == it2.m && it1.ptr == it2.ptr; } + +static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return !(it1 == it2); } + + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline const _Tp& SparseMatConstIterator::value() const +{ return *(_Tp*)ptr; } + +inline const SparseMat::Node* SparseMatConstIterator::node() const +{ + return ptr && m && m->hdr ? + (const SparseMat::Node*)(ptr - m->hdr->valueOffset) : 0; +} + +inline SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + + +inline void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + +inline SparseMatIterator::SparseMatIterator() +{} + +inline SparseMatIterator::SparseMatIterator(SparseMat* _m) +: SparseMatConstIterator(_m) +{} + +inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) +: SparseMatConstIterator(it) +{ +} + +inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline _Tp& SparseMatIterator::value() const +{ return *(_Tp*)ptr; } + +inline SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + +template inline SparseMat_<_Tp>::SparseMat_() +{ flags = MAGIC_VAL | DataType<_Tp>::type; } + +template inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) +: SparseMat(_dims, _sizes, DataType<_Tp>::type) +{} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(this, DataType<_Tp>::type); +} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, DataType<_Tp>::type); + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +template inline SparseMat_<_Tp> +SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline void +SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, DataType<_Tp>::type); +} + +template inline +SparseMat_<_Tp>::operator CvSparseMat*() const +{ + return SparseMat::operator CvSparseMat*(); +} + +template inline int SparseMat_<_Tp>::type() const +{ return DataType<_Tp>::type; } + +template inline int SparseMat_<_Tp>::depth() const +{ return DataType<_Tp>::depth; } + +template inline int SparseMat_<_Tp>::channels() const +{ return DataType<_Tp>::channels; } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ return SparseMat::ref<_Tp>(idx, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ return SparseMat::value<_Tp>(idx, hashval); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) +: SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) +: SparseMatConstIterator(it) +{} + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); } + +template inline const _Tp& +SparseMatConstIterator_<_Tp>::operator *() const +{ return *(const _Tp*)this->ptr; } + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatConstIterator_<_Tp> +SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) +: SparseMatConstIterator_<_Tp>(it) +{} + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); } + +template inline _Tp& +SparseMatIterator_<_Tp>::operator *() const +{ return *(_Tp*)this->ptr; } + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatIterator_<_Tp> +SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +} + +#endif +#endif diff --git a/OpenCV/Headers/core/opengl_interop.hpp b/OpenCV/Headers/core/opengl_interop.hpp new file mode 100644 index 0000000000..24aa546a43 --- /dev/null +++ b/OpenCV/Headers/core/opengl_interop.hpp @@ -0,0 +1,335 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENGL_INTEROP_HPP__ +#define __OPENCV_OPENGL_INTEROP_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" + +namespace cv +{ +//! Smart pointer for OpenGL buffer memory with reference counting. +class CV_EXPORTS GlBuffer +{ +public: + enum Usage + { + ARRAY_BUFFER = 0x8892, // buffer will use for OpenGL arrays (vertices, colors, normals, etc) + TEXTURE_BUFFER = 0x88EC // buffer will ise for OpenGL textures + }; + + //! create empty buffer + explicit GlBuffer(Usage usage); + + //! create buffer + GlBuffer(int rows, int cols, int type, Usage usage); + GlBuffer(Size size, int type, Usage usage); + + //! copy from host/device memory + GlBuffer(InputArray mat, Usage usage); + + void create(int rows, int cols, int type, Usage usage); + void create(Size size, int type, Usage usage); + void create(int rows, int cols, int type); + void create(Size size, int type); + + void release(); + + //! copy from host/device memory + void copyFrom(InputArray mat); + + void bind() const; + void unbind() const; + + //! map to host memory + Mat mapHost(); + void unmapHost(); + + //! map to device memory + gpu::GpuMat mapDevice(); + void unmapDevice(); + + inline int rows() const { return rows_; } + inline int cols() const { return cols_; } + inline Size size() const { return Size(cols_, rows_); } + inline bool empty() const { return rows_ == 0 || cols_ == 0; } + + inline int type() const { return type_; } + inline int depth() const { return CV_MAT_DEPTH(type_); } + inline int channels() const { return CV_MAT_CN(type_); } + inline int elemSize() const { return CV_ELEM_SIZE(type_); } + inline int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + inline Usage usage() const { return usage_; } + + class Impl; +private: + int rows_; + int cols_; + int type_; + Usage usage_; + + Ptr impl_; +}; + +template <> CV_EXPORTS void Ptr::delete_obj(); + +//! Smart pointer for OpenGL 2d texture memory with reference counting. +class CV_EXPORTS GlTexture +{ +public: + //! create empty texture + GlTexture(); + + //! create texture + GlTexture(int rows, int cols, int type); + GlTexture(Size size, int type); + + //! copy from host/device memory + explicit GlTexture(InputArray mat, bool bgra = true); + + void create(int rows, int cols, int type); + void create(Size size, int type); + void release(); + + //! copy from host/device memory + void copyFrom(InputArray mat, bool bgra = true); + + void bind() const; + void unbind() const; + + inline int rows() const { return rows_; } + inline int cols() const { return cols_; } + inline Size size() const { return Size(cols_, rows_); } + inline bool empty() const { return rows_ == 0 || cols_ == 0; } + + inline int type() const { return type_; } + inline int depth() const { return CV_MAT_DEPTH(type_); } + inline int channels() const { return CV_MAT_CN(type_); } + inline int elemSize() const { return CV_ELEM_SIZE(type_); } + inline int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + class Impl; +private: + int rows_; + int cols_; + int type_; + + Ptr impl_; + GlBuffer buf_; +}; + +template <> CV_EXPORTS void Ptr::delete_obj(); + +//! OpenGL Arrays +class CV_EXPORTS GlArrays +{ +public: + inline GlArrays() + : vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), bgra_(true), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER) + { + } + + void setVertexArray(InputArray vertex); + inline void resetVertexArray() { vertex_.release(); } + + void setColorArray(InputArray color, bool bgra = true); + inline void resetColorArray() { color_.release(); } + + void setNormalArray(InputArray normal); + inline void resetNormalArray() { normal_.release(); } + + void setTexCoordArray(InputArray texCoord); + inline void resetTexCoordArray() { texCoord_.release(); } + + void bind() const; + void unbind() const; + + inline int rows() const { return vertex_.rows(); } + inline int cols() const { return vertex_.cols(); } + inline Size size() const { return vertex_.size(); } + inline bool empty() const { return vertex_.empty(); } + +private: + GlBuffer vertex_; + GlBuffer color_; + bool bgra_; + GlBuffer normal_; + GlBuffer texCoord_; +}; + +//! OpenGL Font +class CV_EXPORTS GlFont +{ +public: + enum Weight + { + WEIGHT_LIGHT = 300, + WEIGHT_NORMAL = 400, + WEIGHT_SEMIBOLD = 600, + WEIGHT_BOLD = 700, + WEIGHT_BLACK = 900 + }; + + enum Style + { + STYLE_NORMAL = 0, + STYLE_ITALIC = 1, + STYLE_UNDERLINE = 2 + }; + + static Ptr get(const std::string& family, int height = 12, Weight weight = WEIGHT_NORMAL, Style style = STYLE_NORMAL); + + void draw(const char* str, int len) const; + + inline const std::string& family() const { return family_; } + inline int height() const { return height_; } + inline Weight weight() const { return weight_; } + inline Style style() const { return style_; } + +private: + GlFont(const std::string& family, int height, Weight weight, Style style); + + std::string family_; + int height_; + Weight weight_; + Style style_; + + unsigned int base_; + + GlFont(const GlFont&); + GlFont& operator =(const GlFont&); +}; + +//! render functions + +//! render texture rectangle in window +CV_EXPORTS void render(const GlTexture& tex, + Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), + Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)); + +//! render mode +namespace RenderMode { + enum { + POINTS = 0x0000, + LINES = 0x0001, + LINE_LOOP = 0x0002, + LINE_STRIP = 0x0003, + TRIANGLES = 0x0004, + TRIANGLE_STRIP = 0x0005, + TRIANGLE_FAN = 0x0006, + QUADS = 0x0007, + QUAD_STRIP = 0x0008, + POLYGON = 0x0009 + }; +} + +//! render OpenGL arrays +CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255)); + +CV_EXPORTS void render(const std::string& str, const Ptr& font, Scalar color, Point2d pos); + +//! OpenGL camera +class CV_EXPORTS GlCamera +{ +public: + GlCamera(); + + void lookAt(Point3d eye, Point3d center, Point3d up); + void setCameraPos(Point3d pos, double yaw, double pitch, double roll); + + void setScale(Point3d scale); + + void setProjectionMatrix(const Mat& projectionMatrix, bool transpose = true); + void setPerspectiveProjection(double fov, double aspect, double zNear, double zFar); + void setOrthoProjection(double left, double right, double bottom, double top, double zNear, double zFar); + + void setupProjectionMatrix() const; + void setupModelViewMatrix() const; + +private: + Point3d eye_; + Point3d center_; + Point3d up_; + + Point3d pos_; + double yaw_; + double pitch_; + double roll_; + + bool useLookAtParams_; + + Point3d scale_; + + Mat projectionMatrix_; + + double fov_; + double aspect_; + + double left_; + double right_; + double bottom_; + double top_; + + double zNear_; + double zFar_; + + bool perspectiveProjection_; +}; + +inline void GlBuffer::create(Size _size, int _type, Usage _usage) { create(_size.height, _size.width, _type, _usage); } +inline void GlBuffer::create(int _rows, int _cols, int _type) { create(_rows, _cols, _type, usage()); } +inline void GlBuffer::create(Size _size, int _type) { create(_size.height, _size.width, _type, usage()); } +inline void GlTexture::create(Size _size, int _type) { create(_size.height, _size.width, _type); } + +namespace gpu +{ + //! set a CUDA device to use OpenGL interoperability + CV_EXPORTS void setGlDevice(int device = 0); +} +} // namespace cv + +#endif // __cplusplus + +#endif // __OPENCV_OPENGL_INTEROP_HPP__ diff --git a/OpenCV/Headers/core/operations.hpp b/OpenCV/Headers/core/operations.hpp new file mode 100644 index 0000000000..4b6e606165 --- /dev/null +++ b/OpenCV/Headers/core/operations.hpp @@ -0,0 +1,3977 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OPERATIONS_HPP__ +#define __OPENCV_CORE_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES + #include + #include +#endif // SKIP_INCLUDES + + +#ifdef __cplusplus + +/////// exchange-add operation for atomic operations on reference counters /////// +#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) // atomic increment on the linux version of the Intel(tm) compiler + #define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta) +#elif defined __GNUC__ + + #if defined __clang__ && __clang_major__ >= 3 + #ifdef __ATOMIC_SEQ_CST + #define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), (delta), __ATOMIC_SEQ_CST) + #else + #define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), (delta), 5) + #endif + #elif __GNUC__*10 + __GNUC_MINOR__ >= 42 + + #if !defined WIN32 && (defined __i486__ || defined __i586__ || \ + defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) + #define CV_XADD __sync_fetch_and_add + #else + #include + #define CV_XADD __gnu_cxx::__exchange_and_add + #endif + + #else + #include + #if __GNUC__*10 + __GNUC_MINOR__ >= 34 + #define CV_XADD __gnu_cxx::__exchange_and_add + #else + #define CV_XADD __exchange_and_add + #endif + #endif + +#elif defined WIN32 || defined _WIN32 || defined WINCE + namespace cv { CV_EXPORTS int _interlockedExchangeAdd(int* addr, int delta); } + #define CV_XADD cv::_interlockedExchangeAdd + +#else + static inline int CV_XADD(int* addr, int delta) + { int tmp = *addr; *addr += delta; return tmp; } +#endif + +#include + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable:4127) //conditional expression is constant +#endif + +namespace cv +{ + +using std::cos; +using std::sin; +using std::max; +using std::min; +using std::exp; +using std::log; +using std::pow; +using std::sqrt; + + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +template static inline _Tp saturate_cast(double v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) +{ return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) +{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) +{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) +{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline schar saturate_cast(uchar v) +{ return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) +{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) +{ + return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? + v : v > 0 ? SCHAR_MAX : SCHAR_MIN); +} +template<> inline schar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) +{ return (schar)std::min(v, (unsigned)SCHAR_MAX); } + +template<> inline schar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline ushort saturate_cast(schar v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) +{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) +{ return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline short saturate_cast(ushort v) +{ return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) +{ + return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? + v : v > 0 ? SHRT_MAX : SHRT_MIN); +} +template<> inline short saturate_cast(unsigned v) +{ return (short)std::min(v, (unsigned)SHRT_MAX); } +template<> inline short saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v){ return cvRound(v); } +template<> inline unsigned saturate_cast(double v) { return cvRound(v); } + +inline int fast_abs(uchar v) { return v; } +inline int fast_abs(schar v) { return std::abs((int)v); } +inline int fast_abs(ushort v) { return v; } +inline int fast_abs(short v) { return std::abs((int)v); } +inline int fast_abs(int v) { return std::abs(v); } +inline float fast_abs(float v) { return std::abs(v); } +inline double fast_abs(double v) { return std::abs(v); } + +//////////////////////////////// Matx ///////////////////////////////// + + +template inline Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + assert(channels >= 2); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + assert(channels >= 3); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + assert(channels >= 4); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + assert(channels >= 5); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5) +{ + assert(channels >= 6); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) +{ + assert(channels >= 7); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + assert(channels >= 8); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) +{ + assert(channels >= 9); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) +{ + assert(channels >= 10); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + assert(channels == 12); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; +} + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + assert(channels == 16); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; +} + +template inline Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +template inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = 1; + return M; +} + +template inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i]; + return s; +} + + +template inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + + + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const typename Matx<_Tp,m,n>::diag_type& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = d(i, 0); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randu(matM, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randn(matM, Scalar(a), Scalar(b)); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_DbgAssert(m1*n1 == m*n); + return (const Matx<_Tp, m1, n1>&)*this; +} + + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const +{ + CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(i+di, j+dj); + return s; +} + + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v.val[i] = val[i*n + j]; + return v; +} + + +template inline +typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < MIN(m, n); i++ ) + d.val[i] = val[i*n + i]; + return d; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return this->val[i*n + j]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return val[i*n + j]; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + + +template static inline +Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b) +{ + Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp()); + return reinterpret_cast&>(c); +} + + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 2, 1> tmp = a*Vec<_Tp,2>(b.x, b.y); + return Point_<_Tp>(tmp.val[0], tmp.val[1]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + Matx c(Matx(a), b, Matx_MatMulOp()); + return reinterpret_cast(c); +} + + +static inline +Scalar operator * (const Matx& a, const Scalar& b) +{ + Matx c(a, b, Matx_MatMulOp()); + return reinterpret_cast(c); +} + + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + + +template struct CV_EXPORTS Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m, m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return p; + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return Matx_DetOp<_Tp, m>()(a); +} + + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + + +template struct CV_EXPORTS Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for( int i = 0; i < m; i++ ) + b(i, i) = (_Tp)1; + + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const + { + _Tp d = (_Tp)determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const +{ + Matx<_Tp, n, m> b; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastInvOp<_Tp, m>()(*this, b, method); + else + { + Mat A(*this, false), B(b, false); + ok = (invert(A, B, method) != 0); + } + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + + +template struct CV_EXPORTS Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int) const + { + _Tp d = (_Tp)determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method); + else + { + Mat A(*this, false), B(rhs, false), X(x, false); + ok = cv::solve(A, B, X, method); + } + + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + +template inline +Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const +{ + Matx<_Tp, n, 1> x = solve(reinterpret_cast&>(rhs), method); + return reinterpret_cast&>(x); +} + +template static inline +_AccTp normL2Sqr(const _Tp* a, int n) +{ + _AccTp s = 0; + int i=0; + #if CV_ENABLE_UNROLLED + for( ; i <= n - 4; i += 4 ) + { + _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3]; + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = a[i]; + s += v*v; + } + return s; +} + + +template static inline +_AccTp normL1(const _Tp* a, int n) +{ + _AccTp s = 0; + int i = 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + s += (_AccTp)fast_abs(a[i]) + (_AccTp)fast_abs(a[i+1]) + + (_AccTp)fast_abs(a[i+2]) + (_AccTp)fast_abs(a[i+3]); + } +#endif + for( ; i < n; i++ ) + s += fast_abs(a[i]); + return s; +} + + +template static inline +_AccTp normInf(const _Tp* a, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + s = std::max(s, (_AccTp)fast_abs(a[i])); + return s; +} + + +template static inline +_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += v*v; + } + return s; +} + +CV_EXPORTS float normL2Sqr_(const float* a, const float* b, int n); +CV_EXPORTS float normL1_(const float* a, const float* b, int n); +CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize); + +template<> inline float normL2Sqr(const float* a, const float* b, int n) +{ + if( n >= 8 ) + return normL2Sqr_(a, b, n); + float s = 0; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += v*v; + } + return s; +} + + +template static inline +_AccTp normL1(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3); + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += std::abs(v); + } + return s; +} + +template<> inline float normL1(const float* a, const float* b, int n) +{ + if( n >= 8 ) + return normL1_(a, b, n); + float s = 0; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += std::abs(v); + } + return s; +} + +template<> inline int normL1(const uchar* a, const uchar* b, int n) +{ + return normL1_(a, b, n); +} + +template static inline +_AccTp normInf(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + { + _AccTp v0 = a[i] - b[i]; + s = std::max(s, std::abs(v0)); + } + return s; +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n)); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + return normType == NORM_INF ? (double)normInf<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) : + normType == NORM_L1 ? (double)normL1<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) : + std::sqrt((double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n)); +} + + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + +/////////////////////////// short vector (Vec) ///////////////////////////// + +template inline Vec<_Tp, cn>::Vec() +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) +{} + +template inline Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) +{} + + +template inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) +: Matx<_Tp, cn, 1>(a, alpha, op) +{} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template Vec<_Tp, 2> conjugate(const Vec<_Tp, 2>& v) +{ + return Vec<_Tp, 2>(v[0], -v[1]); +} + +template Vec<_Tp, 4> conjugate(const Vec<_Tp, 4>& v) +{ + return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>&) const +{ + CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template template +inline Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline Vec<_Tp, cn>::operator CvScalar() const +{ + CvScalar s = {{0,0,0,0}}; + int i; + for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i]; + for( ; i < 4; i++ ) s.val[i] = 0; + return s; +} + +template inline const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template static inline Vec<_Tp1, cn>& +operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline Vec<_Tp1, cn>& +operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline Vec<_Tp, cn> +operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha) +{ + float ialpha = 1.f/alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + return Vec<_Tp, 4>(saturate_cast<_Tp>(v1[0]*v2[0] - v1[1]*v2[1] - v1[2]*v2[2] - v1[3]*v2[3]), + saturate_cast<_Tp>(v1[0]*v2[1] + v1[1]*v2[0] + v1[2]*v2[3] - v1[3]*v2[2]), + saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]), + saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0])); +} + +template inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + v1 = v1 * v2; + return v1; +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template inline Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v) +{ + double nv = norm(v); + return v * (nv ? 1./nv : 0.); +} + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//////////////////////////////// Complex ////////////////////////////// + +template inline Complex<_Tp>::Complex() : re(0), im(0) {} +template inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {} +template template inline Complex<_Tp>::operator Complex() const +{ return Complex(saturate_cast(re), saturate_cast(im)); } +template inline Complex<_Tp> Complex<_Tp>::conj() const +{ return Complex<_Tp>(re, -im); } + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re == b.re && a.im == b.im; } + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re != b.re || a.im != b.im; } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re += b.re; a.im += b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re -= b.re; a.im -= b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ return Complex<_Tp>(-a.re, -a.im); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( b - a.re, -a.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ a.re += b; return a; } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ a.re -= b; return a; } + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ a.re *= b; a.im *= b; return a; } + +template static inline +double abs(const Complex<_Tp>& a) +{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); } + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return (a = a / b); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + +//////////////////////////////// 2D Point //////////////////////////////// + +template inline Point_<_Tp>::Point_() : x(0), y(0) {} +template inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {} +template inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint2D32f& pt) + : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {} +template inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {} +template inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {} +template inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ x = pt.x; y = pt.y; return *this; } + +template template inline Point_<_Tp>::operator Point_<_Tp2>() const +{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); } +template inline Point_<_Tp>::operator CvPoint() const +{ return cvPoint(saturate_cast(x), saturate_cast(y)); } +template inline Point_<_Tp>::operator CvPoint2D32f() const +{ return cvPoint2D32f((float)x, (float)y); } +template inline Point_<_Tp>::operator Vec<_Tp, 2>() const +{ return Vec<_Tp, 2>(x, y); } + +template inline _Tp Point_<_Tp>::dot(const Point_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); } +template inline double Point_<_Tp>::ddot(const Point_& pt) const +{ return (double)x*pt.x + (double)y*pt.y; } + +template inline double Point_<_Tp>::cross(const Point_& pt) const +{ return (double)x*pt.y - (double)y*pt.x; } + +template static inline Point_<_Tp>& +operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + return a; +} + +template static inline Point_<_Tp>& +operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline double norm(const Point_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); } + +template static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x == b.x && a.y == b.y; } + +template static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x != b.x || a.y != b.y; } + +template static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a) +{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +//////////////////////////////// 3D Point //////////////////////////////// + +template inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {} +template inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {} +template inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {} +template inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {} +template inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) : + x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {} +template inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline Point3_<_Tp>::operator Point3_<_Tp2>() const +{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); } + +template inline Point3_<_Tp>::operator CvPoint3D32f() const +{ return cvPoint3D32f((float)x, (float)y, (float)z); } + +template inline Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ return Vec<_Tp, 3>(x, y, z); } + +template inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ x = pt.x; y = pt.y; z = pt.z; return *this; } + +template inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); } +template inline double Point3_<_Tp>::ddot(const Point3_& pt) const +{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; } + +template inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + +template static inline Point3_<_Tp>& +operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + a.z = saturate_cast<_Tp>(a.z + b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + a.z = saturate_cast<_Tp>(a.z - b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline double norm(const Point3_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); } + +template static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x == b.x && a.y == b.y && a.z == b.z; } + +template static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x != b.x || a.y != b.y || a.z != b.z; } + +template static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), + saturate_cast<_Tp>(a.y + b.y), + saturate_cast<_Tp>(a.z + b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), + saturate_cast<_Tp>(a.y - b.y), + saturate_cast<_Tp>(a.z - b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), + saturate_cast<_Tp>(-a.y), + saturate_cast<_Tp>(-a.z) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +//////////////////////////////// Size //////////////////////////////// + +template inline Size_<_Tp>::Size_() + : width(0), height(0) {} +template inline Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} +template inline Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} +template inline Size_<_Tp>::Size_(const CvSize& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const CvSize2D32f& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {} + +template template inline Size_<_Tp>::operator Size_<_Tp2>() const +{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Size_<_Tp>::operator CvSize() const +{ return cvSize(saturate_cast(width), saturate_cast(height)); } +template inline Size_<_Tp>::operator CvSize2D32f() const +{ return cvSize2D32f((float)width, (float)height); } + +template inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ width = sz.width; height = sz.height; return *this; } +template static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ return Size_<_Tp>(a.width * b, a.height * b); } +template static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width + b.width, a.height + b.height); } +template static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width - b.width, a.height - b.height); } +template inline _Tp Size_<_Tp>::area() const { return width*height; } + +template static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width += b.width; a.height += b.height; return a; } +template static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width == b.width && a.height == b.height; } +template static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width != b.width || a.height != b.height; } + +//////////////////////////////// Rect //////////////////////////////// + + +template inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {} +template inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {} +template inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {} +template inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) : + x(org.x), y(org.y), width(sz.width), height(sz.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y; +} +template inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; } + +template inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); } +template inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x += b.x; a.y += b.y; return a; } +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x -= b.x; a.y -= b.y; return a; } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width += b.width; a.height += b.height; return a; } + +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + return a; +} + +template inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); } +template inline _Tp Rect_<_Tp>::area() const { return width*height; } + +template template inline Rect_<_Tp>::operator Rect_<_Tp2>() const +{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), + saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Rect_<_Tp>::operator CvRect() const +{ return cvRect(saturate_cast(x), saturate_cast(y), + saturate_cast(width), saturate_cast(height)); } + +template inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } + +template static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +template inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + +inline RotatedRect::RotatedRect() { angle = 0; } +inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} +inline RotatedRect::RotatedRect(const CvBox2D& box) + : center(box.center), size(box.size), angle(box.angle) {} +inline RotatedRect::operator CvBox2D() const +{ + CvBox2D box; box.center = center; box.size = size; box.angle = angle; + return box; +} + +//////////////////////////////// Scalar_ /////////////////////////////// + +template inline Scalar_<_Tp>::Scalar_() +{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; } + +template inline Scalar_<_Tp>::Scalar_(const CvScalar& s) +{ + this->val[0] = saturate_cast<_Tp>(s.val[0]); + this->val[1] = saturate_cast<_Tp>(s.val[1]); + this->val[2] = saturate_cast<_Tp>(s.val[2]); + this->val[3] = saturate_cast<_Tp>(s.val[3]); +} + +template inline Scalar_<_Tp>::Scalar_(_Tp v0) +{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ return Scalar_<_Tp>(v0, v0, v0, v0); } +template inline Scalar_<_Tp>::operator CvScalar() const +{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); } + +template template inline Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + +template static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] * v); + a.val[1] = saturate_cast<_Tp>(a.val[1] * v); + a.val[2] = saturate_cast<_Tp>(a.val[2] * v); + a.val[3] = saturate_cast<_Tp>(a.val[3] * v); + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const +{ + return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale), + saturate_cast<_Tp>(this->val[1]*t.val[1]*scale), + saturate_cast<_Tp>(this->val[2]*t.val[2]*scale), + saturate_cast<_Tp>(this->val[3]*t.val[3]*scale)); +} + +template static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]), + saturate_cast<_Tp>(a.val[1] + b.val[1]), + saturate_cast<_Tp>(a.val[2] + b.val[2]), + saturate_cast<_Tp>(a.val[3] + b.val[3])); +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha), + saturate_cast<_Tp>(a.val[1] * alpha), + saturate_cast<_Tp>(a.val[2] * alpha), + saturate_cast<_Tp>(a.val[3] * alpha)); +} + +template static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline Scalar_<_Tp> +operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0])); +} + +template static inline Scalar_<_Tp>& +operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a*b; + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha), + saturate_cast<_Tp>(a.val[1] / alpha), + saturate_cast<_Tp>(a.val[2] / alpha), + saturate_cast<_Tp>(a.val[3] / alpha)); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a/alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj()*s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a*((_Tp)1/b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a/b; + return a; +} + +//////////////////////////////// Range ///////////////////////////////// + +inline Range::Range() : start(0), end(0) {} +inline Range::Range(int _start, int _end) : start(_start), end(_end) {} +inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index) +{ + if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX ) + *this = Range::all(); +} + +inline int Range::size() const { return end - start; } +inline bool Range::empty() const { return start == end; } +inline Range Range::all() { return Range(INT_MIN, INT_MAX); } + +static inline bool operator == (const Range& r1, const Range& r2) +{ return r1.start == r2.start && r1.end == r2.end; } + +static inline bool operator != (const Range& r1, const Range& r2) +{ return !(r1 == r2); } + +static inline bool operator !(const Range& r) +{ return r.start == r.end; } + +static inline Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + +inline Range::operator CvSlice() const +{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; } + + + +//////////////////////////////// Vector //////////////////////////////// + +// template vector class. It is similar to STL's vector, +// with a few important differences: +// 1) it can be created on top of user-allocated data w/o copying it +// 2) vector b = a means copying the header, +// not the underlying data (use clone() to make a deep copy) +template class CV_EXPORTS Vector +{ +public: + typedef _Tp value_type; + typedef _Tp* iterator; + typedef const _Tp* const_iterator; + typedef _Tp& reference; + typedef const _Tp& const_reference; + + struct CV_EXPORTS Hdr + { + Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {}; + _Tp* data; + _Tp* datastart; + int* refcount; + size_t size; + size_t capacity; + }; + + Vector() {} + Vector(size_t _size) { resize(_size); } + Vector(size_t _size, const _Tp& val) + { + resize(_size); + for(size_t i = 0; i < _size; i++) + hdr.data[i] = val; + } + Vector(_Tp* _data, size_t _size, bool _copyData=false) + { set(_data, _size, _copyData); } + + template Vector(const Vec<_Tp, n>& vec) + { set((_Tp*)&vec.val[0], n, true); } + + Vector(const std::vector<_Tp>& vec, bool _copyData=false) + { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); } + + Vector(const Vector& d) { *this = d; } + + Vector(const Vector& d, const Range& r_) + { + Range r = r_ == Range::all() ? Range(0, d.size()) : r_; + /*if( r == Range::all() ) + r = Range(0, d.size());*/ + if( r.size() > 0 && r.start >= 0 && r.end <= d.size() ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + hdr.refcount = d.hdr.refcount; + hdr.datastart = d.hdr.datastart; + hdr.data = d.hdr.data + r.start; + hdr.capacity = hdr.size = r.size(); + } + } + + Vector<_Tp>& operator = (const Vector& d) + { + if( this != &d ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + release(); + hdr = d.hdr; + } + return *this; + } + + ~Vector() { release(); } + + Vector<_Tp> clone() const + { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); } + + void copyTo(Vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = vec.hdr.data; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + void copyTo(std::vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = sz ? &vec[0] : 0; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + operator CvMat() const + { return cvMat((int)size(), 1, type(), (void*)hdr.data); } + + _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; } + const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; } + Vector operator() (const Range& r) const { return Vector(*this, r); } + _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; } + const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; } + + _Tp* begin() { return hdr.data; } + _Tp* end() { return hdr.data + hdr.size; } + const _Tp* begin() const { return hdr.data; } + const _Tp* end() const { return hdr.data + hdr.size; } + + void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); } + void release() + { + if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 ) + { + delete[] hdr.datastart; + delete hdr.refcount; + } + hdr = Hdr(); + } + + void set(_Tp* _data, size_t _size, bool _copyData=false) + { + if( !_copyData ) + { + release(); + hdr.data = hdr.datastart = _data; + hdr.size = hdr.capacity = _size; + hdr.refcount = 0; + } + else + { + reserve(_size); + for( size_t i = 0; i < _size; i++ ) + hdr.data[i] = _data[i]; + hdr.size = _size; + } + } + + void reserve(size_t newCapacity) + { + _Tp* newData; + int* newRefcount; + size_t i, oldSize = hdr.size; + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity ) + return; + newCapacity = std::max(newCapacity, oldSize); + newData = new _Tp[newCapacity]; + newRefcount = new int(1); + for( i = 0; i < oldSize; i++ ) + newData[i] = hdr.data[i]; + release(); + hdr.data = hdr.datastart = newData; + hdr.capacity = newCapacity; + hdr.size = oldSize; + hdr.refcount = newRefcount; + } + + void resize(size_t newSize) + { + size_t i; + newSize = std::max(newSize, (size_t)0); + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize ) + return; + if( newSize > hdr.capacity ) + reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2))); + for( i = hdr.size; i < newSize; i++ ) + hdr.data[i] = _Tp(); + hdr.size = newSize; + } + + Vector<_Tp>& push_back(const _Tp& elem) + { + if( hdr.size == hdr.capacity ) + reserve( std::max((size_t)4, hdr.capacity*2) ); + hdr.data[hdr.size++] = elem; + return *this; + } + + Vector<_Tp>& pop_back() + { + if( hdr.size > 0 ) + --hdr.size; + return *this; + } + + size_t size() const { return hdr.size; } + size_t capacity() const { return hdr.capacity; } + bool empty() const { return hdr.size == 0; } + void clear() { resize(0); } + int type() const { return DataType<_Tp>::type; } + +protected: + Hdr hdr; +}; + + +template inline typename DataType<_Tp>::work_type +dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2) +{ + typedef typename DataType<_Tp>::work_type _Tw; + size_t i = 0, n = v1.size(); + assert(v1.size() == v2.size()); + + _Tw s = 0; + const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0]; + for( ; i < n; i++ ) + s += (_Tw)ptr1[i]*ptr2[i]; + + return s; +} + +// Multiply-with-Carry RNG +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32); + return (unsigned)state; +} + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator unsigned() { return next(); } +inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);} +inline unsigned RNG::operator ()() {return next();} +inline RNG::operator int() { return (int)next(); } +// * (2^32-1)^-1 +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() +{ + unsigned t = next(); + return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20; +} +inline int RNG::uniform(int a, int b) { return a == b ? a : (int)(next()%(b - a) + a); } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {} +inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} +inline TermCriteria::TermCriteria(const CvTermCriteria& criteria) + : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {} +inline TermCriteria::operator CvTermCriteria() const +{ return cvTermCriteria(type, maxCount, epsilon); } + +inline uchar* LineIterator::operator *() { return ptr; } +inline LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} +inline LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} +inline Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +/////////////////////////////// AutoBuffer //////////////////////////////////////// + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + size = fixed_size; +} + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + size = fixed_size; + allocate(_size); +} + +template inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= size) + return; + deallocate(); + if(_size > fixed_size) + { + ptr = cv::allocate<_Tp>(_size); + size = _size; + } +} + +template inline void AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + cv::deallocate<_Tp>(ptr, size); + ptr = buf; + size = fixed_size; + } +} + +template inline AutoBuffer<_Tp, fixed_size>::operator _Tp* () +{ return ptr; } + +template inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const +{ return ptr; } + + +/////////////////////////////////// Ptr //////////////////////////////////////// + +template inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {} +template inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj) +{ + if(obj) + { + refcount = (int*)fastMalloc(sizeof(*refcount)); + *refcount = 1; + } + else + refcount = 0; +} + +template inline void Ptr<_Tp>::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +template inline void Ptr<_Tp>::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + { + delete_obj(); + fastFree(refcount); + } + refcount = 0; + obj = 0; +} + +template inline void Ptr<_Tp>::delete_obj() +{ + if( obj ) delete obj; +} + +template inline Ptr<_Tp>::~Ptr() { release(); } + +template inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& _ptr) +{ + obj = _ptr.obj; + refcount = _ptr.refcount; + addref(); +} + +template inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& _ptr) +{ + int* _refcount = _ptr.refcount; + if( _refcount ) + CV_XADD(_refcount, 1); + release(); + obj = _ptr.obj; + refcount = _refcount; + return *this; +} + +template inline _Tp* Ptr<_Tp>::operator -> () { return obj; } +template inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; } + +template inline Ptr<_Tp>::operator _Tp* () { return obj; } +template inline Ptr<_Tp>::operator const _Tp*() const { return obj; } + +template inline bool Ptr<_Tp>::empty() const { return obj == 0; } + +template template Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p) + : obj(0), refcount(0) +{ + if (p.empty()) + return; + + _Tp* p_casted = dynamic_cast<_Tp*>(p.obj); + if (!p_casted) + return; + + obj = p_casted; + refcount = p.refcount; + addref(); +} + +template template inline Ptr<_Tp2> Ptr<_Tp>::ptr() +{ + Ptr<_Tp2> p; + if( !obj ) + return p; + + _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj); + if (!obj_casted) + return p; + + if( refcount ) + CV_XADD(refcount, 1); + + p.obj = obj_casted; + p.refcount = refcount; + return p; +} + +template template inline const Ptr<_Tp2> Ptr<_Tp>::ptr() const +{ + Ptr<_Tp2> p; + if( !obj ) + return p; + + _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj); + if (!obj_casted) + return p; + + if( refcount ) + CV_XADD(refcount, 1); + + p.obj = obj_casted; + p.refcount = refcount; + return p; +} + +//// specializied implementations of Ptr::delete_obj() for classic OpenCV types + +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value ); + +template inline void write(FileStorage& fs, const _Tp& value) +{ write(fs, string(), value); } + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const string& value ); + +template<> inline void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const string& value ) +{ + writeScalar(fs, value); +} + +template inline void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +class CV_EXPORTS WriteStructContext +{ +public: + WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName=string()); + ~WriteStructContext(); + FileStorage* fs; +}; + +template inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const string& name, const Range& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.start); + write(fs, r.end); +} + +template class CV_EXPORTS VecWriterProxy +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + size_t i, count = vec.size(); + for( i = 0; i < count; i++ ) + write( *fs, vec[i] ); + } + FileStorage* fs; +}; + +template class CV_EXPORTS VecWriterProxy<_Tp,1> +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + fs->writeRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, vec.size()*sizeof(_Tp) ); + } + FileStorage* fs; +}; + +template static inline void write( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline void write( FileStorage& fs, const string& name, + const vector<_Tp>& vec ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+(DataType<_Tp>::fmt != 0 ? CV_NODE_FLOW : 0)); + write(fs, vec); +} + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value ); + +template static inline FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( CV_StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str); + +static inline FileStorage& operator << (FileStorage& fs, const char* str) +{ return (fs << string(str)); } + +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) + : fs(_fs), node(_node) {} + +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} + +inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; } +inline size_t FileNode::size() const +{ + int t = type(); + return t == MAP ? (size_t)((CvSet*)node->data.map)->active_count : + t == SEQ ? (size_t)node->data.seq->total : (size_t)!isNone(); +} + +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } + +static inline void read(const FileNode& node, int& value, int default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff; +} + +static inline void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, short& value, short default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, float& value, float default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f; +} + +static inline void read(const FileNode& node, double& value, double default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300; +} + +static inline void read(const FileNode& node, string& value, const string& default_value) +{ + value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); +} + +CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); + +inline FileNode::operator int() const +{ + int value; + read(*this, value, 0); + return value; +} +inline FileNode::operator float() const +{ + float value; + read(*this, value, 0.f); + return value; +} +inline FileNode::operator double() const +{ + double value; + read(*this, value, 0.); + return value; +} +inline FileNode::operator string() const +{ + string value; + read(*this, value, value); + return value; +} + +inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const +{ + begin().readRaw( fmt, vec, len ); +} + +template class CV_EXPORTS VecReaderProxy +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for( size_t i = 0; i < count; i++, ++(*it) ) + read(**it, vec[i], _Tp()); + } + FileNodeIterator* it; +}; + +template class CV_EXPORTS VecReaderProxy<_Tp,1> +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining, cn = DataType<_Tp>::channels; + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + size_t remaining1 = remaining/cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, count*sizeof(_Tp) ); + } + FileNodeIterator* it; +}; + +template static inline void +read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX ) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline void +read( const FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() ) +{ + if(!node.node) + vec = default_value; + else + { + FileNodeIterator it = node.begin(); + read( it, vec ); + } +} + +inline FileNodeIterator FileNode::begin() const +{ + return FileNodeIterator(fs, node); +} + +inline FileNodeIterator FileNode::end() const +{ + return FileNodeIterator(fs, node, size()); +} + +inline FileNode FileNodeIterator::operator *() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +inline FileNode FileNodeIterator::operator ->() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +template static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ read( *it, value, _Tp()); return ++it; } + +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +template static inline void operator >> (const FileNode& n, _Tp& value) +{ read( n, value, _Tp()); } + +template static inline void operator >> (const FileNode& n, vector<_Tp>& vec) +{ FileNodeIterator it = n.begin(); it >> vec; } + +static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +inline FileNode FileStorage::getFirstTopLevelNode() const +{ + FileNode r = root(); + FileNodeIterator it = r.begin(); + return it != r.end() ? *it : FileNode(); +} + +//////////////////////////////////////// Various algorithms //////////////////////////////////// + +template static inline _Tp gcd(_Tp a, _Tp b) +{ + if( a < b ) + std::swap(a, b); + while( b > 0 ) + { + _Tp r = a % b; + a = b; + b = r; + } + return a; +} + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm + Use it as: vector<_Tp> a; ... sort(a,); + + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +template void sort( vector<_Tp>& vec, _LT LT=_LT() ) +{ + int isort_thresh = 7; + int sp = 0; + + struct + { + _Tp *lb; + _Tp *ub; + } stack[48]; + + size_t total = vec.size(); + + if( total <= 1 ) + return; + + _Tp* arr = &vec[0]; + stack[0].lb = arr; + stack[0].ub = arr + (total - 1); + + while( sp >= 0 ) + { + _Tp* left = stack[sp].lb; + _Tp* right = stack[sp--].ub; + + for(;;) + { + int i, n = (int)(right - left) + 1, m; + _Tp* ptr; + _Tp* ptr2; + + if( n <= isort_thresh ) + { + insert_sort: + for( ptr = left + 1; ptr <= right; ptr++ ) + { + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) + std::swap( ptr2[0], ptr2[-1] ); + } + break; + } + else + { + _Tp* left0; + _Tp* left1; + _Tp* right0; + _Tp* right1; + _Tp* pivot; + _Tp* a; + _Tp* b; + _Tp* c; + int swap_cnt = 0; + + left0 = left; + right0 = right; + pivot = left + (n/2); + + if( n > 40 ) + { + int d = n / 8; + a = left, b = left + d, c = left + 2*d; + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = pivot - d, b = pivot, c = pivot + d; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = right - 2*d, b = right - d, c = right; + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + } + + a = left, b = pivot, c = right; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + if( pivot != left0 ) + { + std::swap( *pivot, *left0 ); + pivot = left0; + } + left = left1 = left0 + 1; + right = right1 = right0; + + for(;;) + { + while( left <= right && !LT(*pivot, *left) ) + { + if( !LT(*left, *pivot) ) + { + if( left > left1 ) + std::swap( *left1, *left ); + swap_cnt = 1; + left1++; + } + left++; + } + + while( left <= right && !LT(*right, *pivot) ) + { + if( !LT(*pivot, *right) ) + { + if( right < right1 ) + std::swap( *right1, *right ); + swap_cnt = 1; + right1--; + } + right--; + } + + if( left > right ) + break; + std::swap( *left, *right ); + swap_cnt = 1; + left++; + right--; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + n = std::min( (int)(left1 - left0), (int)(left - left1) ); + for( i = 0; i < n; i++ ) + std::swap( left0[i], left[i-n] ); + + n = std::min( (int)(right0 - right1), (int)(right1 - right) ); + for( i = 0; i < n; i++ ) + std::swap( left[i], right0[i-n+1] ); + n = (int)(left - left1); + m = (int)(right1 - right); + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + stack[++sp].lb = left0; + stack[sp].ub = left0 + n - 1; + left = right0 - m + 1, right = right0; + } + else + { + stack[++sp].lb = right0 - m + 1; + stack[sp].ub = right0; + left = left0, right = left0 + n - 1; + } + } + else + left = left0, right = left0 + n - 1; + } + else if( m > 1 ) + left = right0 - m + 1, right = right0; + else + break; + } + } + } +} + +template class CV_EXPORTS LessThan +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a < b; } +}; + +template class CV_EXPORTS GreaterEq +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; } +}; + +template class CV_EXPORTS LessThanIdx +{ +public: + LessThanIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; +}; + +template class CV_EXPORTS GreaterEqIdx +{ +public: + GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] >= arr[b]; } + const _Tp* arr; +}; + + +// This function splits the input sequence or set into one or more equivalence classes and +// returns the vector of labels - 0-based class indexes for each element. +// predicate(a,b) returns true if the two sequence elements certainly belong to the same class. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets" +template int +partition( const vector<_Tp>& _vec, vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + + +////////////////////////////////////////////////////////////////////////////// + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, r); } + +template inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], range); +} + +template inline Seq<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd) +{ + cvStartReadSeq(_seq.seq, this); + index = seekEnd ? _seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; + if( std::abs(static_cast(delta)) > n ) + delta += delta < 0 ? n : -n; + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + + +template struct CV_EXPORTS RTTIImpl +{ +public: + static int isInstance(const void* ptr) + { + static _ClsName dummy; + static void* dummyp = &dummy; + union + { + const void* p; + const void** pp; + } a, b; + a.p = dummyp; + b.p = ptr; + return *a.pp == *b.pp; + } + static void release(void** dbptr) + { + if(dbptr && *dbptr) + { + delete (_ClsName*)*dbptr; + *dbptr = 0; + } + } + static void* read(CvFileStorage* fs, CvFileNode* n) + { + FileNode fn(fs, n); + _ClsName* obj = new _ClsName; + if(obj->read(fn)) + return obj; + delete obj; + return 0; + } + + static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList) + { + if(ptr && _fs) + { + FileStorage fs(_fs); + fs.fs.addref(); + ((const _ClsName*)ptr)->write(fs, string(name)); + } + } + + static void* clone(const void* ptr) + { + if(!ptr) + return 0; + return new _ClsName(*(const _ClsName*)ptr); + } +}; + + +class CV_EXPORTS Formatter +{ +public: + virtual ~Formatter() {} + virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0; + virtual void write(std::ostream& out, const void* data, int nelems, int type, + const int* params=0, int nparams=0) const = 0; + static const Formatter* get(const char* fmt=""); + static const Formatter* setDefault(const Formatter* fmt); +}; + + +struct CV_EXPORTS Formatted +{ + Formatted(const Mat& m, const Formatter* fmt, + const vector& params); + Formatted(const Mat& m, const Formatter* fmt, + const int* params=0); + Mat mtx; + const Formatter* fmt; + vector params; +}; + +static inline Formatted format(const Mat& mtx, const char* fmt, + const vector& params=vector()) +{ + return Formatted(mtx, Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +/** \brief prints Mat to the output stream in Matlab notation + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + Formatter::get()->write(out, mtx); + return out; +} + +/** \brief prints Mat to the output stream allows in the specified notation (see format) + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd) +{ + fmtd.fmt->write(out, fmtd.mtx); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +/** Writes a Matx to an output stream. + */ +template inline std::ostream& operator<<(std::ostream& out, const Matx<_Tp, m, n>& matx) +{ + out << cv::Mat(matx); + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +/** Writes a Vec to an output stream. Format example : [10, 20, 30] + */ +template inline std::ostream& operator<<(std::ostream& out, const Vec<_Tp, n>& vec) +{ + out << "["; + for (int i = 0; i < n - 1; ++i) { + out << vec[i] << ", "; + } + out << vec[n-1] << "]"; + + return out; +} + +/** Writes a Size_ to an output stream. Format example : [640 x 480] + */ +template inline std::ostream& operator<<(std::ostream& out, const Size_<_Tp>& size) +{ + out << "[" << size.width << " x " << size.height << "]"; + return out; +} + +/** Writes a Rect_ to an output stream. Format example : [640 x 480 from (10, 20)] + */ +template inline std::ostream& operator<<(std::ostream& out, const Rect_<_Tp>& rect) +{ + out << "[" << rect.width << " x " << rect.height << " from (" << rect.x << ", " << rect.y << ")]"; + return out; +} + + +template inline Ptr<_Tp> Algorithm::create(const string& name) +{ + return _create(name).ptr<_Tp>(); +} + +template +inline void Algorithm::set(const char* _name, const Ptr<_Tp>& value) +{ + Ptr algo_ptr = value. template ptr(); + if (algo_ptr.empty()) { + CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); + } + info()->set(this, _name, ParamType::type, &algo_ptr); +} + +template +inline void Algorithm::set(const string& _name, const Ptr<_Tp>& value) +{ + this->set<_Tp>(_name.c_str(), value); +} + +template +inline void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value) +{ + Ptr algo_ptr = value. template ptr(); + if (algo_ptr.empty()) { + CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); + } + info()->set(this, _name, ParamType::type, &algo_ptr); +} + +template +inline void Algorithm::setAlgorithm(const string& _name, const Ptr<_Tp>& value) +{ + this->set<_Tp>(_name.c_str(), value); +} + +template inline typename ParamType<_Tp>::member_type Algorithm::get(const string& _name) const +{ + typename ParamType<_Tp>::member_type value; + info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value); + return value; +} + +template inline typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const +{ + typename ParamType<_Tp>::member_type value; + info()->get(this, _name, ParamType<_Tp>::type, &value); + return value; +} + +template inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, + Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), + const string& help) +{ + //TODO: static assert: _Tp inherits from _Base + addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly, + (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); +} + +template inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, + Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), + const string& help) +{ + //TODO: static assert: _Tp inherits from Algorithm + addParam_(algo, parameter, ParamType::type, &value, readOnly, + (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); +} + +} + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#endif // __cplusplus +#endif diff --git a/OpenCV/Headers/core/types_c.h b/OpenCV/Headers/core/types_c.h new file mode 100644 index 0000000000..cbc7872e61 --- /dev/null +++ b/OpenCV/Headers/core/types_c.h @@ -0,0 +1,1901 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_TYPES_H__ +#define __OPENCV_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER +# if _MSC_VER > 1300 +# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +# endif +#endif + + +#ifndef SKIP_INCLUDES + +#include +#include +#include +#include + +#if !defined _MSC_VER && !defined __BORLANDC__ +# include +#endif + +#if defined __ICL +# define CV_ICC __ICL +#elif defined __ICC +# define CV_ICC __ICC +#elif defined __ECL +# define CV_ICC __ECL +#elif defined __ECC +# define CV_ICC __ECC +#elif defined __INTEL_COMPILER +# define CV_ICC __INTEL_COMPILER +#endif + +#if defined CV_ICC && !defined CV_ENABLE_UNROLLED +# define CV_ENABLE_UNROLLED 0 +#else +# define CV_ENABLE_UNROLLED 1 +#endif + +#if (defined _M_X64 && defined _MSC_VER && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__) +# if defined WIN32 +# include +# endif +# if defined __SSE2__ || !defined __GNUC__ +# include +# endif +#endif + +#if defined __BORLANDC__ +# include +#else +# include +#endif + +#ifdef HAVE_IPL +# ifndef __IPL_H__ +# if defined WIN32 || defined _WIN32 +# include +# else +# include +# endif +# endif +#elif defined __IPL_H__ +# define HAVE_IPL +#endif + +#endif // SKIP_INCLUDES + +#if defined WIN32 || defined _WIN32 +# define CV_CDECL __cdecl +# define CV_STDCALL __stdcall +#else +# define CV_CDECL +# define CV_STDCALL +#endif + +#ifndef CV_EXTERN_C +# ifdef __cplusplus +# define CV_EXTERN_C extern "C" +# define CV_DEFAULT(val) = val +# else +# define CV_EXTERN_C +# define CV_DEFAULT(val) +# endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR +# ifdef __cplusplus +# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } +# else +# define CV_EXTERN_C_FUNCPTR(x) typedef x +# endif +#endif + +#ifndef CV_INLINE +# if defined __cplusplus +# define CV_INLINE inline +# elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__ +# define CV_INLINE __inline +# else +# define CV_INLINE static +# endif +#endif /* CV_INLINE */ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS +# define CV_EXPORTS __declspec(dllexport) +#else +# define CV_EXPORTS +#endif + +#ifndef CVAPI +# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#if defined _MSC_VER || defined __BORLANDC__ + typedef __int64 int64; + typedef unsigned __int64 uint64; +# define CV_BIG_INT(n) n##I64 +# define CV_BIG_UINT(n) n##UI64 +#else + typedef int64_t int64; + typedef uint64_t uint64; +# define CV_BIG_INT(n) n##LL +# define CV_BIG_UINT(n) n##ULL +#endif + +#ifndef HAVE_IPL + typedef unsigned char uchar; + typedef unsigned short ushort; +#endif + +typedef signed char schar; + +/* special informative macros for wrapper generators */ +#define CV_CARRAY(counter) +#define CV_CUSTOM_CARRAY(args) +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) +#define CV_WRAP_DEFAULT(value) + +/* CvArr* is used to pass arbitrary + * array-like data structures + * into functions where the particular + * array type is recognized at runtime: + */ +typedef void CvArr; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +typedef int CVStatus; + +enum { + CV_StsOk= 0, /* everithing is ok */ + CV_StsBackTrace= -1, /* pseudo error for back trace */ + CV_StsError= -2, /* unknown /unspecified error */ + CV_StsInternal= -3, /* internal error (bad state) */ + CV_StsNoMem= -4, /* insufficient memory */ + CV_StsBadArg= -5, /* function arg/param is bad */ + CV_StsBadFunc= -6, /* unsupported function */ + CV_StsNoConv= -7, /* iter. didn't converge */ + CV_StsAutoTrace= -8, /* tracing */ + CV_HeaderIsNull= -9, /* image header is NULL */ + CV_BadImageSize= -10, /* image size is invalid */ + CV_BadOffset= -11, /* offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**/ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**/ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**/ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**/ + CV_BadOrigin= -20, /**/ + CV_BadAlign= -21, /**/ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**/ + CV_BadROISize= -25, /**/ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /* null pointer */ + CV_StsVecLengthErr= -28, /* incorrect vector length */ + CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */ + CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */ + CV_StsFilterOffsetErr= -31, /* incorrect filter ofset value */ + CV_StsBadSize= -201, /* the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /* division by zero */ + CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */ + CV_StsObjectNotFound= -204, /* request can't be completed */ + CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */ + CV_StsBadFlag= -206, /* flag is wrong or not supported */ + CV_StsBadPoint= -207, /* bad CvPoint */ + CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /* some of parameters are out of range */ + CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */ + CV_StsAssert= -215, /* assertion failed */ + CV_GpuNotSupported= -216, + CV_GpuApiCallError= -217, + CV_OpenGlNotSupported= -218, + CV_OpenGlApiCallError= -219 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#ifdef HAVE_TEGRA_OPTIMIZATION +# include "tegra_round.hpp" +#endif + +#define CV_PI 3.1415926535897932384626433832795 +#define CV_LOG2 0.69314718055994530941723212145818 + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +#ifndef MIN +# define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +# define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/* min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/* absolute value without jumps */ +#ifndef __cplusplus +# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +# define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +CV_INLINE int cvRound( double value ) +{ +#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && defined __SSE2__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__ +# ifdef HAVE_TEGRA_OPTIMIZATION + TEGRA_ROUND(value); +# else + return (int)lrint(value); +# endif +#else + // while this is not IEEE754-compliant rounding, it's usually a good enough approximation + return (int)(value + (value >= 0 ? 0.5 : -0.5)); +#endif +} + +#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP) +# include "emmintrin.h" +#endif + +CV_INLINE int cvFloor( double value ) +{ +#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i))); +#elif defined __GNUC__ + int i = (int)value; + return i - (i > value); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(value - i); + return i - (diff.i < 0); +#endif +} + + +CV_INLINE int cvCeil( double value ) +{ +#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t)); +#elif defined __GNUC__ + int i = (int)value; + return i + (i < value); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(i - value); + return i + (diff.i < 0); +#endif +} + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + +CV_INLINE int cvIsNaN( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return _isnan(value); +#elif defined __GNUC__ + return isnan(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +#endif +} + + +CV_INLINE int cvIsInf( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return !_finite(value); +#elif defined __GNUC__ + return isinf(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +#endif +} + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/* Return random 32-bit unsigned integer: */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/* Returns random floating-point number between 0 and 1: */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +typedef struct _IplImage +{ + int nSize; /* sizeof(IplImage) */ + int ID; /* version (=0)*/ + int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /* Ignored by OpenCV */ + int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /* Ignored by OpenCV */ + char channelSeq[4]; /* ditto */ + int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /* 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /* Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /* Image width in pixels. */ + int height; /* Image height in pixels. */ + struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /* Must be NULL. */ + void *imageId; /* " " */ + struct _IplTileInfo *tileInfo; /* " " */ + int imageSize; /* Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /* Pointer to aligned image data. */ + int widthStep; /* Size of aligned image row in bytes. */ + int BorderMode[4]; /* Ignored by OpenCV. */ + int BorderConst[4]; /* Ditto. */ + char *imageDataOrigin; /* Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ +} +IplImage; + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/* extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/* for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/* get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +/* Size of each channel item, + 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/* Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + + +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + + +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(mat->data.ptr + (size_t)mat->step*row))[col] = (double)value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 +#define CV_MAX_DIM_HEAP 1024 + +typedef struct CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; +} +CvMatND; + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; +} +CvSparseMat; + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/* indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/* should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */ + float** thresh2; /* For non-uniform histograms. */ + CvMatND mat; /* Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ + +typedef struct CvRect +{ + int x; + int y; + int width; + int height; +} +CvRect; + +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ + CvRect r; + + r.x = x; + r.y = y; + r.width = width; + r.height = height; + + return r; +} + + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +typedef struct CvTermCriteria +{ + int type; /* may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ + CvTermCriteria t; + + t.type = type; + t.max_iter = max_iter; + t.epsilon = (float)epsilon; + + return t; +} + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; +} +CvPoint; + + +CV_INLINE CvPoint cvPoint( int x, int y ) +{ + CvPoint p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint2D32f +{ + float x; + float y; +} +CvPoint2D32f; + + +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ + CvPoint2D32f p; + + p.x = (float)x; + p.y = (float)y; + + return p; +} + + +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + + +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ + CvPoint ipt; + ipt.x = cvRound(point.x); + ipt.y = cvRound(point.y); + + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; +} +CvPoint3D32f; + + +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ + CvPoint3D32f p; + + p.x = (float)x; + p.y = (float)y; + p.z = (float)z; + + return p; +} + + +typedef struct CvPoint2D64f +{ + double x; + double y; +} +CvPoint2D64f; + + +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +} +CvPoint3D64f; + + +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct CvSize +{ + int width; + int height; +} +CvSize; + +CV_INLINE CvSize cvSize( int width, int height ) +{ + CvSize s; + + s.width = width; + s.height = height; + + return s; +} + +typedef struct CvSize2D32f +{ + float width; + float height; +} +CvSize2D32f; + + +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ + CvSize2D32f s; + + s.width = (float)width; + s.height = (float)height; + + return s; +} + +typedef struct CvBox2D +{ + CvPoint2D32f center; /* Center of the box. */ + CvSize2D32f size; /* Box width and length. */ + float angle; /* Angle between the horizontal axis */ + /* and the first side (i.e. length) in degrees */ +} +CvBox2D; + + +/* Line iterator state: */ +typedef struct CvLineIterator +{ + /* Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ + +typedef struct CvSlice +{ + int start_index, end_index; +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ + CvSlice slice; + slice.start_index = start; + slice.end_index = end; + + return slice; +} + +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + + +/************************************* CvScalar *****************************************/ + +typedef struct CvScalar +{ + double val[4]; +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ + CvScalar scalar; + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ + CvScalar scalar; + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ + CvScalar scalar; + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /* First allocated block. */ + CvMemBlock* top; /* Current memory block - top of the stack. */ + struct CvMemStorage* parent; /* We get new blocks from parent as needed. */ + int block_size; /* Block size. */ + int free_space; /* Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /* Previous sequence block. */ + struct CvSeqBlock* next; /* Next sequence block. */ + int start_index; /* Index of the first element in the block + */ + /* sequence->first->start_index. */ + int count; /* Number of elements in the block. */ + schar* data; /* Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /* Miscellaneous flags. */ \ + int header_size; /* Size of sequence header. */ \ + struct node_type* h_prev; /* Previous sequence. */ \ + struct node_type* h_next; /* Next sequence. */ \ + struct node_type* v_prev; /* 2nd previous sequence. */ \ + struct node_type* v_next /* 2nd next sequence. */ + +/* + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /* Total number of elements. */ \ + int elem_size; /* Size of sequence element in bytes. */ \ + schar* block_max; /* Maximal bound of the last block. */ \ + schar* ptr; /* Current write pointer. */ \ + int delta_elems; /* Grow seq this many at a time. */ \ + CvMemStorage* storage; /* Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /* Free blocks list. */ \ + CvSeqBlock* first; /* Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/* + Set. + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/* Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/* + We represent a graph as a set of vertices. + Vertices contain their adjacency lists (more exactly, pointers to first incoming or + outcoming edge (or 0 if isolated vertex)). Edges are stored in another set. + There is a singly-linked list of incoming/outcoming edges for each vertex. + + Each edge consists of + + o Two pointers to the starting and ending vertices + (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between + vertex i to vertex j are not distinguished during search operations. + + o Two pointers to next edges for the starting and ending vertices, where + next[0] points to the next edge in the vtx[0] adjacency list and + next[1] points to the next edge in the vtx[1] adjacency list. +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/* + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/*********************************** Chain/Countour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/* flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/* type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* the sequence written */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to free space */ \ + schar* block_min; /* pointer to the beginning of block*/\ + schar* block_max; /* pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* sequence, beign read */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to element be read next */ \ + schar* block_min; /* pointer to the beginning of block */\ + schar* block_max; /* pointer to the end of block */ \ + int delta_index;/* = seq->first->start_index */ \ + schar* prev_elem; /* pointer to previous element */ + + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/* assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/* Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/* Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/* Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/* Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/* Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/* Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/* "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/* Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 +#define CV_STORAGE_MEMORY 4 +#define CV_STORAGE_FORMAT_MASK (7<<3) +#define CV_STORAGE_FORMAT_AUTO 0 +#define CV_STORAGE_FORMAT_XML 8 +#define CV_STORAGE_FORMAT_YAML 16 + +/* List of attributes: */ +typedef struct CvAttrList +{ + const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /* not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/* file node flags */ +#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */ +#define CV_NODE_USER 16 +#define CV_NODE_EMPTY 32 +#define CV_NODE_NAMED 64 + +#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT) +#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL) +#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING) +#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ) +#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP) +#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/* All the keys (names) of elements in the readed file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/* Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /* type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /* scalar floating-point number */ + int i; /* scalar integer number */ + CvString str; /* text string */ + CvSeq* seq; /* sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /* map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +typedef struct CvTypeInfo +{ + int flags; + int header_size; + struct CvTypeInfo* prev; + struct CvTypeInfo* next; + const char* type_name; + CvIsInstanceFunc is_instance; + CvReleaseFunc release; + CvReadFunc read; + CvWriteFunc write; + CvCloneFunc clone; +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +#endif /*__OPENCV_CORE_TYPES_H__*/ + +/* End of file. */ diff --git a/OpenCV/Headers/core/version.hpp b/OpenCV/Headers/core/version.hpp new file mode 100644 index 0000000000..4e68c52a00 --- /dev/null +++ b/OpenCV/Headers/core/version.hpp @@ -0,0 +1,58 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + definition of the current version of OpenCV + Usefull to test in user programs +*/ + +#ifndef __OPENCV_VERSION_HPP__ +#define __OPENCV_VERSION_HPP__ + +#define CV_MAJOR_VERSION 2 +#define CV_MINOR_VERSION 4 +#define CV_SUBMINOR_VERSION 3 + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) +#define CV_VERSION CVAUX_STR(CV_MAJOR_VERSION) "." CVAUX_STR(CV_MINOR_VERSION) "." CVAUX_STR(CV_SUBMINOR_VERSION) + +#endif diff --git a/OpenCV/Headers/core/wimage.hpp b/OpenCV/Headers/core/wimage.hpp new file mode 100644 index 0000000000..c7afa8c5de --- /dev/null +++ b/OpenCV/Headers/core/wimage.hpp @@ -0,0 +1,621 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +///////////////////////////////////////////////////////////////////////////////// +// +// Image class which provides a thin layer around an IplImage. The goals +// of the class design are: +// 1. All the data has explicit ownership to avoid memory leaks +// 2. No hidden allocations or copies for performance. +// 3. Easy access to OpenCV methods (which will access IPP if available) +// 4. Can easily treat external data as an image +// 5. Easy to create images which are subsets of other images +// 6. Fast pixel access which can take advantage of number of channels +// if known at compile time. +// +// The WImage class is the image class which provides the data accessors. +// The 'W' comes from the fact that it is also a wrapper around the popular +// but inconvenient IplImage class. A WImage can be constructed either using a +// WImageBuffer class which allocates and frees the data, +// or using a WImageView class which constructs a subimage or a view into +// external data. The view class does no memory management. Each class +// actually has two versions, one when the number of channels is known at +// compile time and one when it isn't. Using the one with the number of +// channels specified can provide some compile time optimizations by using the +// fact that the number of channels is a constant. +// +// We use the convention (c,r) to refer to column c and row r with (0,0) being +// the upper left corner. This is similar to standard Euclidean coordinates +// with the first coordinate varying in the horizontal direction and the second +// coordinate varying in the vertical direction. +// Thus (c,r) is usually in the domain [0, width) X [0, height) +// +// Example usage: +// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +// vector vec(10, 3.0f); +// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data +// +// im.SetZero(); // same as cvSetZero(im.Ipl()) +// *im(2, 3) = 15; // Modify the element at column 2, row 3 +// MySetRand(&sub_im); +// +// // Copy the second row into the first. This can be done with no memory +// // allocation and will use SSE if IPP is available. +// int w = im.Width(); +// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); +// +// // Doesn't care about source of data since using WImage +// void MySetRand(WImage_b* im) { // Works with any number of channels +// for (int r = 0; r < im->Height(); ++r) { +// float* row = im->Row(r); +// for (int c = 0; c < im->Width(); ++c) { +// for (int ch = 0; ch < im->Channels(); ++ch, ++row) { +// *row = uchar(rand() & 255); +// } +// } +// } +// } +// +// Functions that are not part of the basic image allocation, viewing, and +// access should come from OpenCV, except some useful functions that are not +// part of OpenCV can be found in wimage_util.h +#ifndef __OPENCV_CORE_WIMAGE_HPP__ +#define __OPENCV_CORE_WIMAGE_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +// +// WImage definitions +// +// This WImage class gives access to the data it refers to. It can be +// constructed either by allocating the data with a WImageBuffer class or +// using the WImageView class to refer to a subimage or outside data. +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + + +// Image class when both the pixel type and number of channels +// are known at compile time. This wrapper will speed up some of the operations +// like accessing individual pixels using the () operator. +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +// +// WImageBuffer definitions +// +// Image class which owns the data, so it can be allocated and is always +// freed. It cannot be copied but can be explicity cloned. +// +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +// Like a WImageBuffer class but when the number of channels is known +// at compile time. +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +// +// WImageView definitions +// +// View into an image class which allows treating a subimage as an image +// or treating external data as an image +// +template +class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +// +// Pure virtual destructors still need to be defined. +// +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +// +// Allocate ImageData +// +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +// +// ImageView methods +// +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/OpenCV/Headers/features2d/features2d.hpp b/OpenCV/Headers/features2d/features2d.hpp new file mode 100644 index 0000000000..a205322172 --- /dev/null +++ b/OpenCV/Headers/features2d/features2d.hpp @@ -0,0 +1,1606 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_FEATURES_2D_HPP__ +#define __OPENCV_FEATURES_2D_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/flann/miniflann.hpp" + +#ifdef __cplusplus +#include + +namespace cv +{ + +CV_EXPORTS bool initModule_features2d(); + +/*! + The Keypoint Class + + The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as + Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc. + + The keypoint is characterized by the 2D position, scale + (proportional to the diameter of the neighborhood that needs to be taken into account), + orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor + (usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using + cv::KDTree or another method. +*/ +class CV_EXPORTS_W_SIMPLE KeyPoint +{ +public: + //! the default constructor + CV_WRAP KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {} + //! the full constructor + KeyPoint(Point2f _pt, float _size, float _angle=-1, + float _response=0, int _octave=0, int _class_id=-1) + : pt(_pt), size(_size), angle(_angle), + response(_response), octave(_octave), class_id(_class_id) {} + //! another form of the full constructor + CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1, + float _response=0, int _octave=0, int _class_id=-1) + : pt(x, y), size(_size), angle(_angle), + response(_response), octave(_octave), class_id(_class_id) {} + + size_t hash() const; + + //! converts vector of keypoints to vector of points + static void convert(const vector& keypoints, + CV_OUT vector& points2f, + const vector& keypointIndexes=vector()); + //! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation + static void convert(const vector& points2f, + CV_OUT vector& keypoints, + float size=1, float response=1, int octave=0, int class_id=-1); + + //! computes overlap for pair of keypoints; + //! overlap is a ratio between area of keypoint regions intersection and + //! area of keypoint regions union (now keypoint region is circle) + static float overlap(const KeyPoint& kp1, const KeyPoint& kp2); + + CV_PROP_RW Point2f pt; //!< coordinates of the keypoints + CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood + CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable); + //!< it's in [0,360) degrees and measured relative to + //!< image coordinate system, ie in clockwise. + CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling + CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted + CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to) +}; + +//! writes vector of keypoints to the file storage +CV_EXPORTS void write(FileStorage& fs, const string& name, const vector& keypoints); +//! reads vector of keypoints from the specified file storage node +CV_EXPORTS void read(const FileNode& node, CV_OUT vector& keypoints); + +/* + * A class filters a vector of keypoints. + * Because now it is difficult to provide a convenient interface for all usage scenarios of the keypoints filter class, + * it has only several needed by now static methods. + */ +class CV_EXPORTS KeyPointsFilter +{ +public: + KeyPointsFilter(){} + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void runByImageBorder( vector& keypoints, Size imageSize, int borderSize ); + /* + * Remove keypoints of sizes out of range. + */ + static void runByKeypointSize( vector& keypoints, float minSize, + float maxSize=FLT_MAX ); + /* + * Remove keypoints from some image by mask for pixels of this image. + */ + static void runByPixelsMask( vector& keypoints, const Mat& mask ); + /* + * Remove duplicated keypoints. + */ + static void removeDuplicated( vector& keypoints ); + + /* + * Retain the specified number of the best keypoints (according to the response) + */ + static void retainBest( vector& keypoints, int npoints ); +}; + + +/************************************ Base Classes ************************************/ + +/* + * Abstract base class for 2D image feature detectors. + */ +class CV_EXPORTS_W FeatureDetector : public virtual Algorithm +{ +public: + virtual ~FeatureDetector(); + + /* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + */ + CV_WRAP void detect( const Mat& image, CV_OUT vector& keypoints, const Mat& mask=Mat() ) const; + + /* + * Detect keypoints in an image set. + * images Image collection. + * keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i]. + * masks Masks for image set. masks[i] is a mask for images[i]. + */ + void detect( const vector& images, vector >& keypoints, const vector& masks=vector() ) const; + + // Return true if detector object is empty + CV_WRAP virtual bool empty() const; + + // Create feature detector by detector name. + CV_WRAP static Ptr create( const string& detectorType ); + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const = 0; + + /* + * Remove keypoints that are not in the mask. + * Helper function, useful when wrapping a library call for keypoint detection that + * does not support a mask argument. + */ + static void removeInvalidPoints( const Mat& mask, vector& keypoints ); +}; + + +/* + * Abstract base class for computing descriptors for image keypoints. + * + * In this interface we assume a keypoint descriptor can be represented as a + * dense, fixed-dimensional vector of some basic type. Most descriptors used + * in practice follow this pattern, as it makes it very easy to compute + * distances between descriptors. Therefore we represent a collection of + * descriptors as a Mat, where each row is one keypoint descriptor. + */ +class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm +{ +public: + virtual ~DescriptorExtractor(); + + /* + * Compute the descriptors for a set of keypoints in an image. + * image The image. + * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. + * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. + */ + CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT vector& keypoints, CV_OUT Mat& descriptors ) const; + + /* + * Compute the descriptors for a keypoints collection detected in image collection. + * images Image collection. + * keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i]. + * Keypoints for which a descriptor cannot be computed are removed. + * descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i]. + */ + void compute( const vector& images, vector >& keypoints, vector& descriptors ) const; + + CV_WRAP virtual int descriptorSize() const = 0; + CV_WRAP virtual int descriptorType() const = 0; + + CV_WRAP virtual bool empty() const; + + CV_WRAP static Ptr create( const string& descriptorExtractorType ); + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const = 0; + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void removeBorderKeypoints( vector& keypoints, + Size imageSize, int borderSize ); +}; + + + +/* + * Abstract base class for simultaneous 2D feature detection descriptor extraction. + */ +class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor +{ +public: + /* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + * useProvidedKeypoints If true, the method will skip the detection phase and will compute + * descriptors for the provided keypoints + */ + CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ) const = 0; + + // Create feature detector and descriptor extractor by name. + CV_WRAP static Ptr create( const string& name ); +}; + +/*! + BRISK implementation +*/ +class CV_EXPORTS_W BRISK : public Feature2D +{ +public: + CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f); + + virtual ~BRISK(); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + + // Compute the BRISK features on an image + void operator()(InputArray image, InputArray mask, vector& keypoints) const; + + // Compute the BRISK features and descriptors on an image + void operator()( InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + + // custom setup + CV_WRAP explicit BRISK(std::vector &radiusList, std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, std::vector indexChange=std::vector()); + + // call this to generate the kernel: + // circle of radius r (pixels), with n points; + // short pairings with dMax, long pairings with dMin + CV_WRAP void generateKernel(std::vector &radiusList, + std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + std::vector indexChange=std::vector()); + +protected: + + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + void computeKeypointsNoOrientation(InputArray image, InputArray mask, vector& keypoints) const; + void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool doDescriptors, bool doOrientation, + bool useProvidedKeypoints) const; + + // Feature parameters + CV_PROP_RW int threshold; + CV_PROP_RW int octaves; + + // some helper structures for the Brisk pattern representation + struct BriskPatternPoint{ + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + struct BriskShortPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + }; + struct BriskLongPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + int weighted_dx; // 1024.0/dx + int weighted_dy; // 1024.0/dy + }; + inline int smoothedIntensity(const cv::Mat& image, + const cv::Mat& integral,const float key_x, + const float key_y, const unsigned int scale, + const unsigned int rot, const unsigned int point) const; + // pattern properties + BriskPatternPoint* patternPoints_; //[i][rotation][scale] + unsigned int points_; // total number of collocation points + float* scaleList_; // lists the scaling per scale index [scale] + unsigned int* sizeList_; // lists the total pattern size per scale index [scale] + static const unsigned int scales_; // scales discretization + static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... + static const unsigned int n_rot_; // discretization of the rotation look-up + + // pairs + int strings_; // number of uchars the descriptor consists of + float dMax_; // short pair maximum distance + float dMin_; // long pair maximum distance + BriskShortPair* shortPairs_; // d<_dMax + BriskLongPair* longPairs_; // d>_dMin + unsigned int noShortPairs_; // number of shortParis + unsigned int noLongPairs_; // number of longParis + + // general + static const float basicSize_; +}; + + +/*! + ORB implementation. +*/ +class CV_EXPORTS_W ORB : public Feature2D +{ +public: + // the size of the signature in bytes + enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; + + CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, + int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31 ); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + + // Compute the ORB features and descriptors on an image + void operator()(InputArray image, InputArray mask, vector& keypoints) const; + + // Compute the ORB features and descriptors on an image + void operator()( InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + +protected: + + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + CV_PROP_RW int nfeatures; + CV_PROP_RW double scaleFactor; + CV_PROP_RW int nlevels; + CV_PROP_RW int edgeThreshold; + CV_PROP_RW int firstLevel; + CV_PROP_RW int WTA_K; + CV_PROP_RW int scoreType; + CV_PROP_RW int patchSize; +}; + +typedef ORB OrbFeatureDetector; +typedef ORB OrbDescriptorExtractor; + +/*! + FREAK implementation +*/ +class CV_EXPORTS FREAK : public DescriptorExtractor +{ +public: + /** Constructor + * @param orientationNormalized enable orientation normalization + * @param scaleNormalized enable scale normalization + * @param patternScale scaling of the description pattern + * @param nbOctave number of octaves covered by the detected keypoints + * @param selectedPairs (optional) user defined selected pairs + */ + explicit FREAK( bool orientationNormalized = true, + bool scaleNormalized = true, + float patternScale = 22.0f, + int nOctaves = 4, + const vector& selectedPairs = vector()); + FREAK( const FREAK& rhs ); + FREAK& operator=( const FREAK& ); + + virtual ~FREAK(); + + /** returns the descriptor length in bytes */ + virtual int descriptorSize() const; + + /** returns the descriptor type */ + virtual int descriptorType() const; + + /** select the 512 "best description pairs" + * @param images grayscale images set + * @param keypoints set of detected keypoints + * @param corrThresh correlation threshold + * @param verbose print construction information + * @return list of best pair indexes + */ + vector selectPairs( const vector& images, vector >& keypoints, + const double corrThresh = 0.7, bool verbose = true ); + + AlgorithmInfo* info() const; + + enum + { + NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45 + }; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void buildPattern(); + uchar meanIntensity( const Mat& image, const Mat& integral, const float kp_x, const float kp_y, + const unsigned int scale, const unsigned int rot, const unsigned int point ) const; + + bool orientationNormalized; //true if the orientation is normalized, false otherwise + bool scaleNormalized; //true if the scale is normalized, false otherwise + double patternScale; //scaling of the pattern + int nOctaves; //number of octaves + bool extAll; // true if all pairs need to be extracted for pairs selection + + double patternScale0; + int nOctaves0; + vector selectedPairs0; + + struct PatternPoint + { + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + + struct DescriptionPair + { + uchar i; // index of the first point + uchar j; // index of the second point + }; + + struct OrientationPair + { + uchar i; // index of the first point + uchar j; // index of the second point + int weight_dx; // dx/(norm_sq))*4096 + int weight_dy; // dy/(norm_sq))*4096 + }; + + vector patternLookup; // look-up table for the pattern points (position+sigma of all points at all scales and orientation) + int patternSizes[NB_SCALES]; // size of the pattern at a specific scale (used to check if a point is within image boundaries) + DescriptionPair descriptionPairs[NB_PAIRS]; + OrientationPair orientationPairs[NB_ORIENPAIRS]; +}; + + +/*! + Maximal Stable Extremal Regions class. + + The class implements MSER algorithm introduced by J. Matas. + Unlike SIFT, SURF and many other detectors in OpenCV, this is salient region detector, + not the salient point detector. + + It returns the regions, each of those is encoded as a contour. +*/ +class CV_EXPORTS_W MSER : public FeatureDetector +{ +public: + //! the full constructor + CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400, + double _max_variation=0.25, double _min_diversity=.2, + int _max_evolution=200, double _area_threshold=1.01, + double _min_margin=0.003, int _edge_blur_size=5 ); + + //! the operator that extracts the MSERs from the image or the specific part of it + CV_WRAP_AS(detect) void operator()( const Mat& image, CV_OUT vector >& msers, + const Mat& mask=Mat() ) const; + AlgorithmInfo* info() const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int delta; + int minArea; + int maxArea; + double maxVariation; + double minDiversity; + int maxEvolution; + double areaThreshold; + double minMargin; + int edgeBlurSize; +}; + +typedef MSER MserFeatureDetector; + +/*! + The "Star" Detector. + + The class implements the keypoint detector introduced by K. Konolige. +*/ +class CV_EXPORTS_W StarDetector : public FeatureDetector +{ +public: + //! the full constructor + CV_WRAP StarDetector(int _maxSize=45, int _responseThreshold=30, + int _lineThresholdProjected=10, + int _lineThresholdBinarized=8, + int _suppressNonmaxSize=5); + + //! finds the keypoints in the image + CV_WRAP_AS(detect) void operator()(const Mat& image, + CV_OUT vector& keypoints) const; + + AlgorithmInfo* info() const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int maxSize; + int responseThreshold; + int lineThresholdProjected; + int lineThresholdBinarized; + int suppressNonmaxSize; +}; + +//! detects corners using FAST algorithm by E. Rosten +CV_EXPORTS void FAST( InputArray image, CV_OUT vector& keypoints, + int threshold, bool nonmaxSupression=true ); + +CV_EXPORTS void FASTX( InputArray image, CV_OUT vector& keypoints, + int threshold, bool nonmaxSupression, int type ); + +class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector +{ +public: + + enum + { // Define it in old class to simplify migration to 2.5 + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 + }; + + CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int threshold; + bool nonmaxSuppression; +}; + + +class CV_EXPORTS GFTTDetector : public FeatureDetector +{ +public: + GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int nfeatures; + double qualityLevel; + double minDistance; + int blockSize; + bool useHarrisDetector; + double k; +}; + +typedef GFTTDetector GoodFeaturesToTrackDetector; +typedef StarDetector StarFeatureDetector; + +class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector +{ +public: + struct CV_EXPORTS_W_SIMPLE Params + { + CV_WRAP Params(); + CV_PROP_RW float thresholdStep; + CV_PROP_RW float minThreshold; + CV_PROP_RW float maxThreshold; + CV_PROP_RW size_t minRepeatability; + CV_PROP_RW float minDistBetweenBlobs; + + CV_PROP_RW bool filterByColor; + CV_PROP_RW uchar blobColor; + + CV_PROP_RW bool filterByArea; + CV_PROP_RW float minArea, maxArea; + + CV_PROP_RW bool filterByCircularity; + CV_PROP_RW float minCircularity, maxCircularity; + + CV_PROP_RW bool filterByInertia; + CV_PROP_RW float minInertiaRatio, maxInertiaRatio; + + CV_PROP_RW bool filterByConvexity; + CV_PROP_RW float minConvexity, maxConvexity; + + void read( const FileNode& fn ); + void write( FileStorage& fs ) const; + }; + + CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + +protected: + struct CV_EXPORTS Center + { + Point2d location; + double radius; + double confidence; + }; + + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + virtual void findBlobs(const Mat &image, const Mat &binaryImage, vector
¢ers) const; + + Params params; +}; + + +class CV_EXPORTS DenseFeatureDetector : public FeatureDetector +{ +public: + explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1, + float featureScaleMul=0.1f, + int initXyStep=6, int initImgBound=0, + bool varyXyStepWithScale=true, + bool varyImgBoundWithScale=false ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double initFeatureScale; + int featureScaleLevels; + double featureScaleMul; + + int initXyStep; + int initImgBound; + + bool varyXyStepWithScale; + bool varyImgBoundWithScale; +}; + +/* + * Adapts a detector to partition the source image into a grid and detect + * points in each cell. + */ +class CV_EXPORTS_W GridAdaptedFeatureDetector : public FeatureDetector +{ +public: + /* + * detector Detector that will be adapted. + * maxTotalKeypoints Maximum count of keypoints detected on the image. Only the strongest keypoints + * will be keeped. + * gridRows Grid rows count. + * gridCols Grid column count. + */ + CV_WRAP GridAdaptedFeatureDetector( const Ptr& detector=0, + int maxTotalKeypoints=1000, + int gridRows=4, int gridCols=4 ); + + // TODO implement read/write + virtual bool empty() const; + + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + Ptr detector; + int maxTotalKeypoints; + int gridRows; + int gridCols; +}; + +/* + * Adapts a detector to detect points over multiple levels of a Gaussian + * pyramid. Useful for detectors that are not inherently scaled. + */ +class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector +{ +public: + // maxLevel - The 0-based index of the last pyramid layer + CV_WRAP PyramidAdaptedFeatureDetector( const Ptr& detector, int maxLevel=2 ); + + // TODO implement read/write + virtual bool empty() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + Ptr detector; + int maxLevel; +}; + +/** \brief A feature detector parameter adjuster, this is used by the DynamicAdaptedFeatureDetector + * and is a wrapper for FeatureDetector that allow them to be adjusted after a detection + */ +class CV_EXPORTS AdjusterAdapter: public FeatureDetector +{ +public: + /** pure virtual interface + */ + virtual ~AdjusterAdapter() {} + /** too few features were detected so, adjust the detector params accordingly + * \param min the minimum number of desired features + * \param n_detected the number previously detected + */ + virtual void tooFew(int min, int n_detected) = 0; + /** too many features were detected so, adjust the detector params accordingly + * \param max the maximum number of desired features + * \param n_detected the number previously detected + */ + virtual void tooMany(int max, int n_detected) = 0; + /** are params maxed out or still valid? + * \return false if the parameters can't be adjusted any more + */ + virtual bool good() const = 0; + + virtual Ptr clone() const = 0; + + static Ptr create( const string& detectorType ); +}; +/** \brief an adaptively adjusting detector that iteratively detects until the desired number + * of features are detected. + * Beware that this is not thread safe - as the adjustment of parameters breaks the const + * of the detection routine... + * /TODO Make this const correct and thread safe + * + * sample usage: + //will create a detector that attempts to find 100 - 110 FAST Keypoints, and will at most run + //FAST feature detection 10 times until that number of keypoints are found + Ptr detector(new DynamicAdaptedFeatureDetector(new FastAdjuster(20,true),100, 110, 10)); + + */ +class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector +{ +public: + + /** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment + * \param max_features the maximum desired number of features + * \param max_iters the maximum number of times to try to adjust the feature detector params + * for the FastAdjuster this can be high, but with Star or Surf this can get time consuming + * \param min_features the minimum desired features + */ + DynamicAdaptedFeatureDetector( const Ptr& adjuster, int min_features=400, int max_features=500, int max_iters=5 ); + + virtual bool empty() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + +private: + DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&); + DynamicAdaptedFeatureDetector(const DynamicAdaptedFeatureDetector&); + + int escape_iters_; + int min_features_, max_features_; + const Ptr adjuster_; +}; + +/**\brief an adjust for the FAST detector. This will basically decrement or increment the + * threshold by 1 + */ +class CV_EXPORTS FastAdjuster: public AdjusterAdapter +{ +public: + /**\param init_thresh the initial threshold to start with, default = 20 + * \param nonmax whether to use non max or not for fast feature detection + */ + FastAdjuster(int init_thresh=20, bool nonmax=true, int min_thresh=1, int max_thresh=200); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int thresh_; + bool nonmax_; + int init_thresh_, min_thresh_, max_thresh_; +}; + + +/** An adjuster for StarFeatureDetector, this one adjusts the responseThreshold for now + * TODO find a faster way to converge the parameters for Star - use CvStarDetectorParams + */ +class CV_EXPORTS StarAdjuster: public AdjusterAdapter +{ +public: + StarAdjuster(double initial_thresh=30.0, double min_thresh=2., double max_thresh=200.); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double thresh_, init_thresh_, min_thresh_, max_thresh_; +}; + +class CV_EXPORTS SurfAdjuster: public AdjusterAdapter +{ +public: + SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 ); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double thresh_, init_thresh_, min_thresh_, max_thresh_; +}; + +CV_EXPORTS Mat windowedMatchingMask( const vector& keypoints1, const vector& keypoints2, + float maxDeltaX, float maxDeltaY ); + + + +/* + * OpponentColorDescriptorExtractor + * + * Adapts a descriptor extractor to compute descripors in Opponent Color Space + * (refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition"). + * Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor + * (set in constructor) computes descriptors on each of the three channel and concatenate + * them into a single color descriptor. + */ +class CV_EXPORTS OpponentColorDescriptorExtractor : public DescriptorExtractor +{ +public: + OpponentColorDescriptorExtractor( const Ptr& descriptorExtractor ); + + virtual void read( const FileNode& ); + virtual void write( FileStorage& ) const; + + virtual int descriptorSize() const; + virtual int descriptorType() const; + + virtual bool empty() const; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + Ptr descriptorExtractor; +}; + +/* + * BRIEF Descriptor + */ +class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor +{ +public: + static const int PATCH_SIZE = 48; + static const int KERNEL_SIZE = 9; + + // bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes. + BriefDescriptorExtractor( int bytes = 32 ); + + virtual void read( const FileNode& ); + virtual void write( FileStorage& ) const; + + virtual int descriptorSize() const; + virtual int descriptorType() const; + + /// @todo read and write for brief + + AlgorithmInfo* info() const; + +protected: + virtual void computeImpl(const Mat& image, vector& keypoints, Mat& descriptors) const; + + typedef void(*PixelTestFn)(const Mat&, const vector&, Mat&); + + int bytes_; + PixelTestFn test_fn_; +}; + + +/****************************************************************************************\ +* Distance * +\****************************************************************************************/ + +template +struct CV_EXPORTS Accumulator +{ + typedef T Type; +}; + +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; + +/* + * Squared Euclidean distance functor + */ +template +struct CV_EXPORTS SL2 +{ + enum { normType = NORM_L2SQR }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL2Sqr(a, b, size); + } +}; + +/* + * Euclidean distance functor + */ +template +struct CV_EXPORTS L2 +{ + enum { normType = NORM_L2 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return (ResultType)sqrt((double)normL2Sqr(a, b, size)); + } +}; + +/* + * Manhattan distance (city block distance) functor + */ +template +struct CV_EXPORTS L1 +{ + enum { normType = NORM_L1 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL1(a, b, size); + } +}; + +/* + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct CV_EXPORTS Hamming +{ + enum { normType = NORM_HAMMING }; + typedef unsigned char ValueType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const + { + return normHamming(a, b, size); + } +}; + +typedef Hamming HammingLUT; + +template struct CV_EXPORTS HammingMultilevel +{ + enum { normType = NORM_HAMMING + (cellsize>1) }; + typedef unsigned char ValueType; + typedef int ResultType; + + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const + { + return normHamming(a, b, size, cellsize); + } +}; + +/****************************************************************************************\ +* DMatch * +\****************************************************************************************/ +/* + * Struct for matching: query descriptor index, train descriptor index, train image index and distance between descriptors. + */ +struct CV_EXPORTS_W_SIMPLE DMatch +{ + CV_WRAP DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {} + CV_WRAP DMatch( int _queryIdx, int _trainIdx, float _distance ) : + queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {} + CV_WRAP DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) : + queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {} + + CV_PROP_RW int queryIdx; // query descriptor index + CV_PROP_RW int trainIdx; // train descriptor index + CV_PROP_RW int imgIdx; // train image index + + CV_PROP_RW float distance; + + // less is better + bool operator<( const DMatch &m ) const + { + return distance < m.distance; + } +}; + +/****************************************************************************************\ +* DescriptorMatcher * +\****************************************************************************************/ +/* + * Abstract base class for matching two sets of descriptors. + */ +class CV_EXPORTS_W DescriptorMatcher : public Algorithm +{ +public: + virtual ~DescriptorMatcher(); + + /* + * Add descriptors to train descriptor collection. + * descriptors Descriptors to add. Each descriptors[i] is a descriptors set from one image. + */ + CV_WRAP virtual void add( const vector& descriptors ); + /* + * Get train descriptors collection. + */ + CV_WRAP const vector& getTrainDescriptors() const; + /* + * Clear train descriptors collection. + */ + CV_WRAP virtual void clear(); + + /* + * Return true if there are not train descriptors in collection. + */ + CV_WRAP virtual bool empty() const; + /* + * Return true if the matcher supports mask in match methods. + */ + CV_WRAP virtual bool isMaskSupported() const = 0; + + /* + * Train matcher (e.g. train flann index). + * In all methods to match the method train() is run every time before matching. + * Some descriptor matchers (e.g. BruteForceMatcher) have empty implementation + * of this method, other matchers really train their inner structures + * (e.g. FlannBasedMatcher trains flann::Index). So nonempty implementation + * of train() should check the class object state and do traing/retraining + * only if the state requires that (e.g. FlannBasedMatcher trains flann::Index + * if it has not trained yet or if new descriptors have been added to the train + * collection). + */ + CV_WRAP virtual void train(); + /* + * Group of methods to match descriptors from image pair. + * Method train() is run in this methods. + */ + // Find one best match for each query descriptor (if mask is empty). + CV_WRAP void match( const Mat& queryDescriptors, const Mat& trainDescriptors, + CV_OUT vector& matches, const Mat& mask=Mat() ) const; + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + CV_WRAP void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, + CV_OUT vector >& matches, int k, + const Mat& mask=Mat(), bool compactResult=false ) const; + // Find best matches for each query descriptor which have distance less than + // maxDistance (in increasing order of distances). + void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, + vector >& matches, float maxDistance, + const Mat& mask=Mat(), bool compactResult=false ) const; + /* + * Group of methods to match descriptors from one image to image set. + * See description of similar methods for matching image pair above. + */ + CV_WRAP void match( const Mat& queryDescriptors, CV_OUT vector& matches, + const vector& masks=vector() ); + CV_WRAP void knnMatch( const Mat& queryDescriptors, CV_OUT vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + void radiusMatch( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + // Reads matcher object from a file node + virtual void read( const FileNode& ); + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const; + + // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies + // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters + // but with empty train data. + virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + CV_WRAP static Ptr create( const string& descriptorMatcherType ); +protected: + /* + * Class to work with descriptors from several images as with one merged matrix. + * It is used e.g. in FlannBasedMatcher. + */ + class CV_EXPORTS DescriptorCollection + { + public: + DescriptorCollection(); + DescriptorCollection( const DescriptorCollection& collection ); + virtual ~DescriptorCollection(); + + // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. + void set( const vector& descriptors ); + virtual void clear(); + + const Mat& getDescriptors() const; + const Mat getDescriptor( int imgIdx, int localDescIdx ) const; + const Mat getDescriptor( int globalDescIdx ) const; + void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; + + int size() const; + + protected: + Mat mergedDescriptors; + vector startIdxs; + }; + + // In fact the matching is implemented only by the following two methods. These methods suppose + // that the class object has been trained already. Public match methods call these methods + // after calling train(). + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ) = 0; + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ) = 0; + + static bool isPossibleMatch( const Mat& mask, int queryIdx, int trainIdx ); + static bool isMaskedOut( const vector& masks, int queryIdx ); + + static Mat clone_op( Mat m ) { return m.clone(); } + void checkMasks( const vector& masks, int queryDescriptorsCount ) const; + + // Collection of descriptors from train images. + vector trainDescCollection; +}; + +/* + * Brute-force descriptor matcher. + * + * For each descriptor in the first set, this matcher finds the closest + * descriptor in the second set by trying each one. + * + * For efficiency, BruteForceMatcher is templated on the distance metric. + * For float descriptors, a common choice would be cv::L2. + */ +class CV_EXPORTS_W BFMatcher : public DescriptorMatcher +{ +public: + CV_WRAP BFMatcher( int normType, bool crossCheck=false ); + virtual ~BFMatcher() {} + + virtual bool isMaskSupported() const { return true; } + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + int normType; + bool crossCheck; +}; + + +/* + * Flann based matcher + */ +class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher +{ +public: + CV_WRAP FlannBasedMatcher( const Ptr& indexParams=new flann::KDTreeIndexParams(), + const Ptr& searchParams=new flann::SearchParams() ); + + virtual void add( const vector& descriptors ); + virtual void clear(); + + // Reads matcher object from a file node + virtual void read( const FileNode& ); + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const; + + virtual void train(); + virtual bool isMaskSupported() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + static void convertToDMatches( const DescriptorCollection& descriptors, + const Mat& indices, const Mat& distances, + vector >& matches ); + + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + Ptr indexParams; + Ptr searchParams; + Ptr flannIndex; + + DescriptorCollection mergedDescriptors; + int addedDescCount; +}; + +/****************************************************************************************\ +* GenericDescriptorMatcher * +\****************************************************************************************/ +/* + * Abstract interface for a keypoint descriptor and matcher + */ +class GenericDescriptorMatcher; +typedef GenericDescriptorMatcher GenericDescriptorMatch; + +class CV_EXPORTS GenericDescriptorMatcher +{ +public: + GenericDescriptorMatcher(); + virtual ~GenericDescriptorMatcher(); + + /* + * Add train collection: images and keypoints from them. + * images A set of train images. + * ketpoints Keypoint collection that have been detected on train images. + * + * Keypoints for which a descriptor cannot be computed are removed. Such keypoints + * must be filtered in this method befor adding keypoints to train collection "trainPointCollection". + * If inheritor class need perform such prefiltering the method add() must be overloaded. + * In the other class methods programmer has access to the train keypoints by a constant link. + */ + virtual void add( const vector& images, + vector >& keypoints ); + + const vector& getTrainImages() const; + const vector >& getTrainKeypoints() const; + + /* + * Clear images and keypoints storing in train collection. + */ + virtual void clear(); + /* + * Returns true if matcher supports mask to match descriptors. + */ + virtual bool isMaskSupported() = 0; + /* + * Train some inner structures (e.g. flann index or decision trees). + * train() methods is run every time in matching methods. So the method implementation + * should has a check whether these inner structures need be trained/retrained or not. + */ + virtual void train(); + + /* + * Classifies query keypoints. + * queryImage The query image + * queryKeypoints Keypoints from the query image + * trainImage The train image + * trainKeypoints Keypoints from the train image + */ + // Classify keypoints from query image under one train image. + void classify( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints ) const; + // Classify keypoints from query image under train image collection. + void classify( const Mat& queryImage, vector& queryKeypoints ); + + /* + * Group of methods to match keypoints from image pair. + * Keypoints for which a descriptor cannot be computed are removed. + * train() method is called here. + */ + // Find one best match for each query descriptor (if mask is empty). + void match( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector& matches, const Mat& mask=Mat() ) const; + // Find k best matches for each query keypoint (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. + // If compactResult is true matches vector will not contain matches for fully masked out query descriptors. + void knnMatch( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector >& matches, int k, + const Mat& mask=Mat(), bool compactResult=false ) const; + // Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances). + void radiusMatch( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector >& matches, float maxDistance, + const Mat& mask=Mat(), bool compactResult=false ) const; + /* + * Group of methods to match keypoints from one image to image set. + * See description of similar methods for matching image pair above. + */ + void match( const Mat& queryImage, vector& queryKeypoints, + vector& matches, const vector& masks=vector() ); + void knnMatch( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + void radiusMatch( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + // Reads matcher object from a file node + virtual void read( const FileNode& fn ); + // Writes matcher object to a file storage + virtual void write( FileStorage& fs ) const; + + // Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty) + virtual bool empty() const; + + // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies + // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters + // but with empty train data. + virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + static Ptr create( const string& genericDescritptorMatcherType, + const string ¶msFilename=string() ); + +protected: + // In fact the matching is implemented only by the following two methods. These methods suppose + // that the class object has been trained already. Public match methods call these methods + // after calling train(). + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ) = 0; + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ) = 0; + /* + * A storage for sets of keypoints together with corresponding images and class IDs + */ + class CV_EXPORTS KeyPointCollection + { + public: + KeyPointCollection(); + KeyPointCollection( const KeyPointCollection& collection ); + void add( const vector& images, const vector >& keypoints ); + void clear(); + + // Returns the total number of keypoints in the collection + size_t keypointCount() const; + size_t imageCount() const; + + const vector >& getKeypoints() const; + const vector& getKeypoints( int imgIdx ) const; + const KeyPoint& getKeyPoint( int imgIdx, int localPointIdx ) const; + const KeyPoint& getKeyPoint( int globalPointIdx ) const; + void getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const; + + const vector& getImages() const; + const Mat& getImage( int imgIdx ) const; + + protected: + int pointCount; + + vector images; + vector > keypoints; + // global indices of the first points in each image, startIndices.size() = keypoints.size() + vector startIndices; + + private: + static Mat clone_op( Mat m ) { return m.clone(); } + }; + + KeyPointCollection trainPointCollection; +}; + + +/****************************************************************************************\ +* VectorDescriptorMatcher * +\****************************************************************************************/ + +/* + * A class used for matching descriptors that can be described as vectors in a finite-dimensional space + */ +class VectorDescriptorMatcher; +typedef VectorDescriptorMatcher VectorDescriptorMatch; + +class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + VectorDescriptorMatcher( const Ptr& extractor, const Ptr& matcher ); + virtual ~VectorDescriptorMatcher(); + + virtual void add( const vector& imgCollection, + vector >& pointCollection ); + + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + Ptr extractor; + Ptr matcher; +}; + +/****************************************************************************************\ +* Drawing functions * +\****************************************************************************************/ +struct CV_EXPORTS DrawMatchesFlags +{ + enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create), + // i.e. existing memory of output image may be reused. + // Two source image, matches and single keypoints will be drawn. + // For each keypoint only the center point will be drawn (without + // the circle around keypoint with keypoint size and orientation). + DRAW_OVER_OUTIMG = 1, // Output image matrix will not be created (Mat::create). + // Matches will be drawn on existing content of output image. + NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn. + DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around keypoint with keypoint size and + // orientation will be drawn. + }; +}; + +// Draw keypoints. +CV_EXPORTS_W void drawKeypoints( const Mat& image, const vector& keypoints, CV_OUT Mat& outImage, + const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); + +// Draws matches of keypints from two images on output image. +CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1, + const Mat& img2, const vector& keypoints2, + const vector& matches1to2, Mat& outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const vector& matchesMask=vector(), int flags=DrawMatchesFlags::DEFAULT ); + +CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1, + const Mat& img2, const vector& keypoints2, + const vector >& matches1to2, Mat& outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const vector >& matchesMask=vector >(), int flags=DrawMatchesFlags::DEFAULT ); + +/****************************************************************************************\ +* Functions to evaluate the feature detectors and [generic] descriptor extractors * +\****************************************************************************************/ + +CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector* keypoints1, vector* keypoints2, + float& repeatability, int& correspCount, + const Ptr& fdetector=Ptr() ); + +CV_EXPORTS void computeRecallPrecisionCurve( const vector >& matches1to2, + const vector >& correctMatches1to2Mask, + vector& recallPrecisionCurve ); + +CV_EXPORTS float getRecall( const vector& recallPrecisionCurve, float l_precision ); +CV_EXPORTS int getNearestPoint( const vector& recallPrecisionCurve, float l_precision ); + +CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector& keypoints1, vector& keypoints2, + vector >* matches1to2, vector >* correctMatches1to2Mask, + vector& recallPrecisionCurve, + const Ptr& dmatch=Ptr() ); + + +/****************************************************************************************\ +* Bag of visual words * +\****************************************************************************************/ +/* + * Abstract base class for training of a 'bag of visual words' vocabulary from a set of descriptors + */ +class CV_EXPORTS BOWTrainer +{ +public: + BOWTrainer(); + virtual ~BOWTrainer(); + + void add( const Mat& descriptors ); + const vector& getDescriptors() const; + int descripotorsCount() const; + + virtual void clear(); + + /* + * Train visual words vocabulary, that is cluster training descriptors and + * compute cluster centers. + * Returns cluster centers. + * + * descriptors Training descriptors computed on images keypoints. + */ + virtual Mat cluster() const = 0; + virtual Mat cluster( const Mat& descriptors ) const = 0; + +protected: + vector descriptors; + int size; +}; + +/* + * This is BOWTrainer using cv::kmeans to get vocabulary. + */ +class CV_EXPORTS BOWKMeansTrainer : public BOWTrainer +{ +public: + BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), + int attempts=3, int flags=KMEANS_PP_CENTERS ); + virtual ~BOWKMeansTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + virtual Mat cluster() const; + virtual Mat cluster( const Mat& descriptors ) const; + +protected: + + int clusterCount; + TermCriteria termcrit; + int attempts; + int flags; +}; + +/* + * Class to compute image descriptor using bag of visual words. + */ +class CV_EXPORTS BOWImgDescriptorExtractor +{ +public: + BOWImgDescriptorExtractor( const Ptr& dextractor, + const Ptr& dmatcher ); + virtual ~BOWImgDescriptorExtractor(); + + void setVocabulary( const Mat& vocabulary ); + const Mat& getVocabulary() const; + void compute( const Mat& image, vector& keypoints, Mat& imgDescriptor, + vector >* pointIdxsOfClusters=0, Mat* descriptors=0 ); + // compute() is not constant because DescriptorMatcher::match is not constant + + int descriptorSize() const; + int descriptorType() const; + +protected: + Mat vocabulary; + Ptr dextractor; + Ptr dmatcher; +}; + +} /* namespace cv */ + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/OpenCV/Headers/flann/all_indices.h b/OpenCV/Headers/flann/all_indices.h new file mode 100644 index 0000000000..ff53fd84c2 --- /dev/null +++ b/OpenCV/Headers/flann/all_indices.h @@ -0,0 +1,155 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_ALL_INDICES_H_ +#define OPENCV_FLANN_ALL_INDICES_H_ + +#include "general.h" + +#include "nn_index.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "hierarchical_clustering_index.h" +#include "lsh_index.h" +#include "autotuned_index.h" + + +namespace cvflann +{ + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE_SINGLE: + nnIndex = new KDTreeSingleIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE: + nnIndex = new KDTreeIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_COMPOSITE: + nnIndex = new CompositeIndex(dataset, params, distance); + break; + case FLANN_INDEX_AUTOTUNED: + nnIndex = new AutotunedIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance) +{ + return index_creator::create(dataset, params,distance); +} + +} + +#endif /* OPENCV_FLANN_ALL_INDICES_H_ */ diff --git a/OpenCV/Headers/flann/allocator.h b/OpenCV/Headers/flann/allocator.h new file mode 100644 index 0000000000..26091d0c74 --- /dev/null +++ b/OpenCV/Headers/flann/allocator.h @@ -0,0 +1,188 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_ALLOCATOR_H_ +#define OPENCV_FLANN_ALLOCATOR_H_ + +#include +#include + + +namespace cvflann +{ + +/** + * Allocates (using C's malloc) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ +template +T* allocate(size_t count = 1) +{ + T* mem = (T*) ::malloc(sizeof(T)*count); + return mem; +} + + +/** + * Pooled storage allocator + * + * The following routines allow for the efficient allocation of storage in + * small chunks from a specified pool. Rather than allowing each structure + * to be freed individually, an entire pool of storage is freed at once. + * This method has two advantages over just using malloc() and free(). First, + * it is far more efficient for allocating small objects, as there is + * no overhead for remembering all the information needed to free each + * object or consolidating fragmented memory. Second, the decision about + * how long to keep an object is made at the time of allocation, and there + * is no need to track down all the objects to free them. + * + */ + +const size_t WORDSIZE=16; +const size_t BLOCKSIZE=8192; + +class PooledAllocator +{ + /* We maintain memory alignment to word boundaries by requiring that all + allocations be in multiples of the machine wordsize. */ + /* Size of machine word in bytes. Must be power of 2. */ + /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ + + + int remaining; /* Number of bytes left in current block of storage. */ + void* base; /* Pointer to base of current block of storage. */ + void* loc; /* Current location in block to next allocate memory. */ + int blocksize; + + +public: + int usedMemory; + int wastedMemory; + + /** + Default constructor. Initializes a new pool. + */ + PooledAllocator(int blockSize = BLOCKSIZE) + { + blocksize = blockSize; + remaining = 0; + base = NULL; + + usedMemory = 0; + wastedMemory = 0; + } + + /** + * Destructor. Frees all the memory allocated in this pool. + */ + ~PooledAllocator() + { + void* prev; + + while (base != NULL) { + prev = *((void**) base); /* Get pointer to prev block. */ + ::free(base); + base = prev; + } + } + + /** + * Returns a pointer to a piece of new memory of the given size in bytes + * allocated from the pool. + */ + void* allocateMemory(int size) + { + int blockSize; + + /* Round size up to a multiple of wordsize. The following expression + only works for WORDSIZE that is a power of 2, by masking last bits of + incremented size to zero. + */ + size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); + + /* Check whether a new block must be allocated. Note that the first word + of a block is reserved for a pointer to the previous block. + */ + if (size > remaining) { + + wastedMemory += remaining; + + /* Allocate new storage. */ + blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ? + size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE; + + // use the standard C malloc to allocate memory + void* m = ::malloc(blockSize); + if (!m) { + fprintf(stderr,"Failed to allocate memory.\n"); + return NULL; + } + + /* Fill first word of new block with pointer to previous block. */ + ((void**) m)[0] = base; + base = m; + + int shift = 0; + //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); + + remaining = blockSize - sizeof(void*) - shift; + loc = ((char*)m + sizeof(void*) + shift); + } + void* rloc = loc; + loc = (char*)loc + size; + remaining -= size; + + usedMemory += size; + + return rloc; + } + + /** + * Allocates (using this pool) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + T* allocate(size_t count = 1) + { + T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count)); + return mem; + } + +}; + +} + +#endif //OPENCV_FLANN_ALLOCATOR_H_ diff --git a/OpenCV/Headers/flann/any.h b/OpenCV/Headers/flann/any.h new file mode 100644 index 0000000000..89189c64ef --- /dev/null +++ b/OpenCV/Headers/flann/any.h @@ -0,0 +1,305 @@ +#ifndef OPENCV_FLANN_ANY_H_ +#define OPENCV_FLANN_ANY_H_ +/* + * (C) Copyright Christopher Diggins 2005-2011 + * (C) Copyright Pablo Aguilar 2005 + * (C) Copyright Kevlin Henney 2001 + * + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt + * + * Adapted for FLANN by Marius Muja + */ + +#include "defines.h" +#include +#include +#include + +namespace cvflann +{ + +namespace anyimpl +{ + +struct bad_any_cast +{ +}; + +struct empty_any +{ +}; + +inline std::ostream& operator <<(std::ostream& out, const empty_any&) +{ + out << "[empty_any]"; + return out; +} + +struct base_any_policy +{ + virtual void static_delete(void** x) = 0; + virtual void copy_from_value(void const* src, void** dest) = 0; + virtual void clone(void* const* src, void** dest) = 0; + virtual void move(void* const* src, void** dest) = 0; + virtual void* get_value(void** src) = 0; + virtual ::size_t get_size() = 0; + virtual const std::type_info& type() = 0; + virtual void print(std::ostream& out, void* const* src) = 0; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~base_any_policy() {} +#endif +}; + +template +struct typed_base_any_policy : base_any_policy +{ + virtual ::size_t get_size() { return sizeof(T); } + virtual const std::type_info& type() { return typeid(T); } + +}; + +template +struct small_any_policy : typed_base_any_policy +{ + virtual void static_delete(void**) { } + virtual void copy_from_value(void const* src, void** dest) + { + new (dest) T(* reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) { *dest = *src; } + virtual void move(void* const* src, void** dest) { *dest = *src; } + virtual void* get_value(void** src) { return reinterpret_cast(src); } + virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast(src); } +}; + +template +struct big_any_policy : typed_base_any_policy +{ + virtual void static_delete(void** x) + { + if (* x) delete (* reinterpret_cast(x)); *x = NULL; + } + virtual void copy_from_value(void const* src, void** dest) + { + *dest = new T(*reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) + { + *dest = new T(**reinterpret_cast(src)); + } + virtual void move(void* const* src, void** dest) + { + (*reinterpret_cast(dest))->~T(); + **reinterpret_cast(dest) = **reinterpret_cast(src); + } + virtual void* get_value(void** src) { return *src; } + virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast(*src); } +}; + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template +struct choose_policy +{ + typedef big_any_policy type; +}; + +template +struct choose_policy +{ + typedef small_any_policy type; +}; + +struct any; + +/// Choosing the policy for an any type is illegal, but should never happen. +/// This is designed to throw a compiler error. +template<> +struct choose_policy +{ + typedef void type; +}; + +/// Specializations for small types. +#define SMALL_POLICY(TYPE) \ + template<> \ + struct choose_policy { typedef small_any_policy type; \ + } + +SMALL_POLICY(signed char); +SMALL_POLICY(unsigned char); +SMALL_POLICY(signed short); +SMALL_POLICY(unsigned short); +SMALL_POLICY(signed int); +SMALL_POLICY(unsigned int); +SMALL_POLICY(signed long); +SMALL_POLICY(unsigned long); +SMALL_POLICY(float); +SMALL_POLICY(bool); + +#undef SMALL_POLICY + +/// This function will return a different policy for each type. +template +base_any_policy* get_policy() +{ + static typename choose_policy::type policy; + return &policy; +} +} // namespace anyimpl + +struct any +{ +private: + // fields + anyimpl::base_any_policy* policy; + void* object; + +public: + /// Initializing constructor. + template + any(const T& x) + : policy(anyimpl::get_policy()), object(NULL) + { + assign(x); + } + + /// Empty constructor. + any() + : policy(anyimpl::get_policy()), object(NULL) + { } + + /// Special initializing constructor for string literals. + any(const char* x) + : policy(anyimpl::get_policy()), object(NULL) + { + assign(x); + } + + /// Copy constructor. + any(const any& x) + : policy(anyimpl::get_policy()), object(NULL) + { + assign(x); + } + + /// Destructor. + ~any() + { + policy->static_delete(&object); + } + + /// Assignment function from another any. + any& assign(const any& x) + { + reset(); + policy = x.policy; + policy->clone(&x.object, &object); + return *this; + } + + /// Assignment function. + template + any& assign(const T& x) + { + reset(); + policy = anyimpl::get_policy(); + policy->copy_from_value(&x, &object); + return *this; + } + + /// Assignment operator. + template + any& operator=(const T& x) + { + return assign(x); + } + + /// Assignment operator, specialed for literal strings. + /// They have types like const char [6] which don't work as expected. + any& operator=(const char* x) + { + return assign(x); + } + + /// Utility functions + any& swap(any& x) + { + std::swap(policy, x.policy); + std::swap(object, x.object); + return *this; + } + + /// Cast operator. You can only cast to the original type. + template + T& cast() + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + T* r = reinterpret_cast(policy->get_value(&object)); + return *r; + } + + /// Cast operator. You can only cast to the original type. + template + const T& cast() const + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + void* obj = const_cast(object); + T* r = reinterpret_cast(policy->get_value(&obj)); + return *r; + } + + /// Returns true if the any contains no value. + bool empty() const + { + return policy->type() == typeid(anyimpl::empty_any); + } + + /// Frees any allocated memory, and sets the value to NULL. + void reset() + { + policy->static_delete(&object); + policy = anyimpl::get_policy(); + } + + /// Returns true if the two types are the same. + bool compatible(const any& x) const + { + return policy->type() == x.policy->type(); + } + + /// Returns if the type is compatible with the policy + template + bool has_type() + { + return policy->type() == typeid(T); + } + + const std::type_info& type() const + { + return policy->type(); + } + + friend std::ostream& operator <<(std::ostream& out, const any& any_val); +}; + +inline std::ostream& operator <<(std::ostream& out, const any& any_val) +{ + any_val.policy->print(out,&any_val.object); + return out; +} + +} + +#endif // OPENCV_FLANN_ANY_H_ diff --git a/OpenCV/Headers/flann/autotuned_index.h b/OpenCV/Headers/flann/autotuned_index.h new file mode 100644 index 0000000000..8d531753e0 --- /dev/null +++ b/OpenCV/Headers/flann/autotuned_index.h @@ -0,0 +1,583 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ +#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_ +#define OPENCV_FLANN_AUTOTUNED_INDEX_H_ + +#include "general.h" +#include "nn_index.h" +#include "ground_truth.h" +#include "index_testing.h" +#include "sampling.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "logger.h" + +namespace cvflann +{ + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance); + + +struct AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1) + { + (*this)["algorithm"] = FLANN_INDEX_AUTOTUNED; + // precision desired (used for autotuning, -1 otherwise) + (*this)["target_precision"] = target_precision; + // build tree time weighting factor + (*this)["build_weight"] = build_weight; + // index memory weighting factor + (*this)["memory_weight"] = memory_weight; + // what fraction of the dataset to use for autotuning + (*this)["sample_fraction"] = sample_fraction; + } +}; + + +template +class AutotunedIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + AutotunedIndex(const Matrix& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) : + dataset_(inputData), distance_(d) + { + target_precision_ = get_param(params, "target_precision",0.8f); + build_weight_ = get_param(params,"build_weight", 0.01f); + memory_weight_ = get_param(params, "memory_weight", 0.0f); + sample_fraction_ = get_param(params,"sample_fraction", 0.1f); + bestIndex_ = NULL; + } + + AutotunedIndex(const AutotunedIndex&); + AutotunedIndex& operator=(const AutotunedIndex&); + + virtual ~AutotunedIndex() + { + if (bestIndex_ != NULL) { + delete bestIndex_; + bestIndex_ = NULL; + } + } + + /** + * Method responsible with building the index. + */ + virtual void buildIndex() + { + bestParams_ = estimateBuildParams(); + Logger::info("----------------------------------------------------\n"); + Logger::info("Autotuned parameters:\n"); + print_params(bestParams_); + Logger::info("----------------------------------------------------\n"); + + bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_); + bestIndex_->buildIndex(); + speedup_ = estimateSearchParams(bestSearchParams_); + Logger::info("----------------------------------------------------\n"); + Logger::info("Search parameters:\n"); + print_params(bestSearchParams_); + Logger::info("----------------------------------------------------\n"); + } + + /** + * Saves the index to a stream + */ + virtual void saveIndex(FILE* stream) + { + save_value(stream, (int)bestIndex_->getType()); + bestIndex_->saveIndex(stream); + save_value(stream, get_param(bestSearchParams_, "checks")); + } + + /** + * Loads the index from a stream + */ + virtual void loadIndex(FILE* stream) + { + int index_type; + + load_value(stream, index_type); + IndexParams params; + params["algorithm"] = (flann_algorithm_t)index_type; + bestIndex_ = create_index_by_type(dataset_, params, distance_); + bestIndex_->loadIndex(stream); + int checks; + load_value(stream, checks); + bestSearchParams_["checks"] = checks; + } + + /** + * Method that searches for nearest-neighbors + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + int checks = get_param(searchParams,"checks",FLANN_CHECKS_AUTOTUNED); + if (checks == FLANN_CHECKS_AUTOTUNED) { + bestIndex_->findNeighbors(result, vec, bestSearchParams_); + } + else { + bestIndex_->findNeighbors(result, vec, searchParams); + } + } + + + IndexParams getParameters() const + { + return bestIndex_->getParameters(); + } + + SearchParams getSearchParameters() const + { + return bestSearchParams_; + } + + float getSpeedup() const + { + return speedup_; + } + + + /** + * Number of features in this index. + */ + virtual size_t size() const + { + return bestIndex_->size(); + } + + /** + * The length of each vector in this index. + */ + virtual size_t veclen() const + { + return bestIndex_->veclen(); + } + + /** + * The amount of memory (in bytes) this index uses. + */ + virtual int usedMemory() const + { + return bestIndex_->usedMemory(); + } + + /** + * Algorithm name + */ + virtual flann_algorithm_t getType() const + { + return FLANN_INDEX_AUTOTUNED; + } + +private: + + struct CostData + { + float searchTimeCost; + float buildTimeCost; + float memoryCost; + float totalCost; + IndexParams params; + }; + + void evaluate_kmeans(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KMeansTree using params: max_iterations=%d, branching=%d\n", + get_param(cost.params,"iterations"), + get_param(cost.params,"branching")); + KMeansIndex kmeans(sampledDataset_, cost.params, distance_); + // measure index build time + t.start(); + kmeans.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + // measure search time + float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\n", buildTime, searchTime, build_weight_); + } + + + void evaluate_kdtree(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KDTree using params: trees=%d\n", get_param(cost.params,"trees")); + KDTreeIndex kdtree(sampledDataset_, cost.params, distance_); + + t.start(); + kdtree.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + //measure search time + float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KDTree buildTime=%g, searchTime=%g\n", buildTime, searchTime); + } + + + // struct KMeansSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; + // + // float operator()(int* params) { + // + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<2) return maxFloat; + // if (params[1]<0) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KMEANS; + // c.params["centers-init"] = CENTERS_RANDOM; + // c.params["branching"] = params[0]; + // c.params["max-iterations"] = params[1]; + // + // autotuner.evaluate_kmeans(c); + // + // return c.timeCost; + // + // } + // }; + // + // struct KDTreeSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; + // + // float operator()(int* params) { + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<1) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KDTREE; + // c.params["trees"] = params[0]; + // + // autotuner.evaluate_kdtree(c); + // + // return c.timeCost; + // + // } + // }; + + + + void optimizeKMeans(std::vector& costs) + { + Logger::info("KMEANS, Step 1: Exploring parameter space\n"); + + // explore kmeans parameters space using combinations of the parameters below + int maxIterations[] = { 1, 5, 10, 15 }; + int branchingFactors[] = { 16, 32, 64, 128, 256 }; + + int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors); + costs.reserve(costs.size() + kmeansParamSpaceSize); + + // evaluate kmeans for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) { + for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) { + CostData cost; + cost.params["algorithm"] = FLANN_INDEX_KMEANS; + cost.params["centers_init"] = FLANN_CENTERS_RANDOM; + cost.params["iterations"] = maxIterations[i]; + cost.params["branching"] = branchingFactors[j]; + + evaluate_kmeans(cost); + costs.push_back(cost); + } + } + + // Logger::info("KMEANS, Step 2: simplex-downhill optimization\n"); + // + // const int n = 2; + // // choose initial simplex points as the best parameters so far + // int kmeansNMPoints[n*(n+1)]; + // float kmeansVals[n+1]; + // for (int i=0;i& costs) + { + Logger::info("KD-TREE, Step 1: Exploring parameter space\n"); + + // explore kd-tree parameters space using the parameters below + int testTrees[] = { 1, 4, 8, 16, 32 }; + + // evaluate kdtree for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) { + CostData cost; + cost.params["trees"] = testTrees[i]; + + evaluate_kdtree(cost); + costs.push_back(cost); + } + + // Logger::info("KD-TREE, Step 2: simplex-downhill optimization\n"); + // + // const int n = 1; + // // choose initial simplex points as the best parameters so far + // int kdtreeNMPoints[n*(n+1)]; + // float kdtreeVals[n+1]; + // for (int i=0;i costs; + + int sampleSize = int(sample_fraction_ * dataset_.rows); + int testSampleSize = std::min(sampleSize / 10, 1000); + + Logger::info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\n", dataset_.rows, sampleSize, testSampleSize, target_precision_); + + // For a very small dataset, it makes no sense to build any fancy index, just + // use linear search + if (testSampleSize < 10) { + Logger::info("Choosing linear, dataset too small\n"); + return LinearIndexParams(); + } + + // We use a fraction of the original dataset to speedup the autotune algorithm + sampledDataset_ = random_sample(dataset_, sampleSize); + // We use a cross-validation approach, first we sample a testset from the dataset + testDataset_ = random_sample(sampledDataset_, testSampleSize, true); + + // We compute the ground truth using linear search + Logger::info("Computing ground truth... \n"); + gt_matches_ = Matrix(new int[testDataset_.rows], testDataset_.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(sampledDataset_, testDataset_, gt_matches_, 0, distance_); + t.stop(); + + CostData linear_cost; + linear_cost.searchTimeCost = (float)t.value; + linear_cost.buildTimeCost = 0; + linear_cost.memoryCost = 0; + linear_cost.params["algorithm"] = FLANN_INDEX_LINEAR; + + costs.push_back(linear_cost); + + // Start parameter autotune process + Logger::info("Autotuning parameters...\n"); + + optimizeKMeans(costs); + optimizeKDTree(costs); + + float bestTimeCost = costs[0].searchTimeCost; + for (size_t i = 0; i < costs.size(); ++i) { + float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost; + if (timeCost < bestTimeCost) { + bestTimeCost = timeCost; + } + } + + float bestCost = costs[0].searchTimeCost / bestTimeCost; + IndexParams bestParams = costs[0].params; + if (bestTimeCost > 0) { + for (size_t i = 0; i < costs.size(); ++i) { + float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost + + memory_weight_ * costs[i].memoryCost; + if (crtCost < bestCost) { + bestCost = crtCost; + bestParams = costs[i].params; + } + } + } + + delete[] gt_matches_.data; + delete[] testDataset_.data; + delete[] sampledDataset_.data; + + return bestParams; + } + + + + /** + * Estimates the search time parameters needed to get the desired precision. + * Precondition: the index is built + * Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search. + */ + float estimateSearchParams(SearchParams& searchParams) + { + const int nn = 1; + const size_t SAMPLE_COUNT = 1000; + + assert(bestIndex_ != NULL); // must have a valid index + + float speedup = 0; + + int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT); + if (samples > 0) { + Matrix testDataset = random_sample(dataset_, samples); + + Logger::info("Computing ground truth\n"); + + // we need to compute the ground truth first + Matrix gt_matches(new int[testDataset.rows], testDataset.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(dataset_, testDataset, gt_matches, 1, distance_); + t.stop(); + float linear = (float)t.value; + + int checks; + Logger::info("Estimating number of checks\n"); + + float searchTime; + float cb_index; + if (bestIndex_->getType() == FLANN_INDEX_KMEANS) { + Logger::info("KMeans algorithm, estimating cluster border factor\n"); + KMeansIndex* kmeans = (KMeansIndex*)bestIndex_; + float bestSearchTime = -1; + float best_cb_index = -1; + int best_checks = -1; + for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) { + kmeans->set_cb_index(cb_index); + searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) { + bestSearchTime = searchTime; + best_cb_index = cb_index; + best_checks = checks; + } + } + searchTime = bestSearchTime; + cb_index = best_cb_index; + checks = best_checks; + + kmeans->set_cb_index(best_cb_index); + Logger::info("Optimum cb_index: %g\n", cb_index); + bestParams_["cb_index"] = cb_index; + } + else { + searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + } + + Logger::info("Required number of checks: %d \n", checks); + searchParams["checks"] = checks; + + speedup = linear / searchTime; + + delete[] gt_matches.data; + delete[] testDataset.data; + } + + return speedup; + } + +private: + NNIndex* bestIndex_; + + IndexParams bestParams_; + SearchParams bestSearchParams_; + + Matrix sampledDataset_; + Matrix testDataset_; + Matrix gt_matches_; + + float speedup_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** + * Index parameters + */ + float target_precision_; + float build_weight_; + float memory_weight_; + float sample_fraction_; + + Distance distance_; + + +}; +} + +#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */ diff --git a/OpenCV/Headers/flann/composite_index.h b/OpenCV/Headers/flann/composite_index.h new file mode 100644 index 0000000000..527ca1ad77 --- /dev/null +++ b/OpenCV/Headers/flann/composite_index.h @@ -0,0 +1,194 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_ +#define OPENCV_FLANN_COMPOSITE_INDEX_H_ + +#include "general.h" +#include "nn_index.h" +#include "kdtree_index.h" +#include "kmeans_index.h" + +namespace cvflann +{ + +/** + * Index parameters for the CompositeIndex. + */ +struct CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // number of randomized trees to use (for kdtree) + (*this)["trees"] = trees; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * This index builds a kd-tree index and a k-means index and performs nearest + * neighbour search both indexes. This gives a slight boost in search performance + * as some of the neighbours that are missed by one index are found by the other. + */ +template +class CompositeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** + * Index constructor + * @param inputData dataset containing the points to index + * @param params Index parameters + * @param d Distance functor + * @return + */ + CompositeIndex(const Matrix& inputData, const IndexParams& params = CompositeIndexParams(), + Distance d = Distance()) : index_params_(params) + { + kdtree_index_ = new KDTreeIndex(inputData, params, d); + kmeans_index_ = new KMeansIndex(inputData, params, d); + + } + + CompositeIndex(const CompositeIndex&); + CompositeIndex& operator=(const CompositeIndex&); + + virtual ~CompositeIndex() + { + delete kdtree_index_; + delete kmeans_index_; + } + + /** + * @return The index type + */ + flann_algorithm_t getType() const + { + return FLANN_INDEX_COMPOSITE; + } + + /** + * @return Size of the index + */ + size_t size() const + { + return kdtree_index_->size(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t veclen() const + { + return kdtree_index_->veclen(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + int usedMemory() const + { + return kmeans_index_->usedMemory() + kdtree_index_->usedMemory(); + } + + /** + * \brief Builds the index + */ + void buildIndex() + { + Logger::info("Building kmeans tree...\n"); + kmeans_index_->buildIndex(); + Logger::info("Building kdtree tree...\n"); + kdtree_index_->buildIndex(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + void saveIndex(FILE* stream) + { + kmeans_index_->saveIndex(stream); + kdtree_index_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + void loadIndex(FILE* stream) + { + kmeans_index_->loadIndex(stream); + kdtree_index_->loadIndex(stream); + } + + /** + * \returns The index parameters + */ + IndexParams getParameters() const + { + return index_params_; + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + kmeans_index_->findNeighbors(result, vec, searchParams); + kdtree_index_->findNeighbors(result, vec, searchParams); + } + +private: + /** The k-means index */ + KMeansIndex* kmeans_index_; + + /** The kd-tree index */ + KDTreeIndex* kdtree_index_; + + /** The index parameters */ + const IndexParams index_params_; +}; + +} + +#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_ diff --git a/OpenCV/Headers/flann/config.h b/OpenCV/Headers/flann/config.h new file mode 100644 index 0000000000..56832fd37f --- /dev/null +++ b/OpenCV/Headers/flann/config.h @@ -0,0 +1,38 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_CONFIG_H_ +#define OPENCV_FLANN_CONFIG_H_ + +#ifdef FLANN_VERSION_ +#undef FLANN_VERSION_ +#endif +#define FLANN_VERSION_ "1.6.10" + +#endif /* OPENCV_FLANN_CONFIG_H_ */ diff --git a/OpenCV/Headers/flann/defines.h b/OpenCV/Headers/flann/defines.h new file mode 100644 index 0000000000..13833b3c0b --- /dev/null +++ b/OpenCV/Headers/flann/defines.h @@ -0,0 +1,176 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_DEFINES_H_ +#define OPENCV_FLANN_DEFINES_H_ + +#include "config.h" + +#ifdef FLANN_EXPORT +#undef FLANN_EXPORT +#endif +#ifdef WIN32 +/* win32 dll export/import directives */ + #ifdef FLANN_EXPORTS + #define FLANN_EXPORT __declspec(dllexport) + #elif defined(FLANN_STATIC) + #define FLANN_EXPORT + #else + #define FLANN_EXPORT __declspec(dllimport) + #endif +#else +/* unix needs nothing */ + #define FLANN_EXPORT +#endif + + +#ifdef FLANN_DEPRECATED +#undef FLANN_DEPRECATED +#endif +#ifdef __GNUC__ +#define FLANN_DEPRECATED __attribute__ ((deprecated)) +#elif defined(_MSC_VER) +#define FLANN_DEPRECATED __declspec(deprecated) +#else +#pragma message("WARNING: You need to implement FLANN_DEPRECATED for this compiler") +#define FLANN_DEPRECATED +#endif + + +#undef FLANN_PLATFORM_32_BIT +#undef FLANN_PLATFORM_64_BIT +#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64 +#define FLANN_PLATFORM_64_BIT +#else +#define FLANN_PLATFORM_32_BIT +#endif + + +#undef FLANN_ARRAY_LEN +#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0])) + +namespace cvflann { + +/* Nearest neighbour index algorithms */ +enum flann_algorithm_t +{ + FLANN_INDEX_LINEAR = 0, + FLANN_INDEX_KDTREE = 1, + FLANN_INDEX_KMEANS = 2, + FLANN_INDEX_COMPOSITE = 3, + FLANN_INDEX_KDTREE_SINGLE = 4, + FLANN_INDEX_HIERARCHICAL = 5, + FLANN_INDEX_LSH = 6, + FLANN_INDEX_SAVED = 254, + FLANN_INDEX_AUTOTUNED = 255, + + // deprecated constants, should use the FLANN_INDEX_* ones instead + LINEAR = 0, + KDTREE = 1, + KMEANS = 2, + COMPOSITE = 3, + KDTREE_SINGLE = 4, + SAVED = 254, + AUTOTUNED = 255 +}; + + + +enum flann_centers_init_t +{ + FLANN_CENTERS_RANDOM = 0, + FLANN_CENTERS_GONZALES = 1, + FLANN_CENTERS_KMEANSPP = 2, + + // deprecated constants, should use the FLANN_CENTERS_* ones instead + CENTERS_RANDOM = 0, + CENTERS_GONZALES = 1, + CENTERS_KMEANSPP = 2 +}; + +enum flann_log_level_t +{ + FLANN_LOG_NONE = 0, + FLANN_LOG_FATAL = 1, + FLANN_LOG_ERROR = 2, + FLANN_LOG_WARN = 3, + FLANN_LOG_INFO = 4 +}; + +enum flann_distance_t +{ + FLANN_DIST_EUCLIDEAN = 1, + FLANN_DIST_L2 = 1, + FLANN_DIST_MANHATTAN = 2, + FLANN_DIST_L1 = 2, + FLANN_DIST_MINKOWSKI = 3, + FLANN_DIST_MAX = 4, + FLANN_DIST_HIST_INTERSECT = 5, + FLANN_DIST_HELLINGER = 6, + FLANN_DIST_CHI_SQUARE = 7, + FLANN_DIST_CS = 7, + FLANN_DIST_KULLBACK_LEIBLER = 8, + FLANN_DIST_KL = 8, + FLANN_DIST_HAMMING = 9, + + // deprecated constants, should use the FLANN_DIST_* ones instead + EUCLIDEAN = 1, + MANHATTAN = 2, + MINKOWSKI = 3, + MAX_DIST = 4, + HIST_INTERSECT = 5, + HELLINGER = 6, + CS = 7, + KL = 8, + KULLBACK_LEIBLER = 8 +}; + +enum flann_datatype_t +{ + FLANN_INT8 = 0, + FLANN_INT16 = 1, + FLANN_INT32 = 2, + FLANN_INT64 = 3, + FLANN_UINT8 = 4, + FLANN_UINT16 = 5, + FLANN_UINT32 = 6, + FLANN_UINT64 = 7, + FLANN_FLOAT32 = 8, + FLANN_FLOAT64 = 9 +}; + +enum +{ + FLANN_CHECKS_UNLIMITED = -1, + FLANN_CHECKS_AUTOTUNED = -2 +}; + +} + +#endif /* OPENCV_FLANN_DEFINES_H_ */ diff --git a/OpenCV/Headers/flann/dist.h b/OpenCV/Headers/flann/dist.h new file mode 100644 index 0000000000..d2674305ce --- /dev/null +++ b/OpenCV/Headers/flann/dist.h @@ -0,0 +1,814 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DIST_H_ +#define OPENCV_FLANN_DIST_H_ + +#include +#include +#include +#ifdef _MSC_VER +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +#include "defines.h" + +#ifdef __ARM_NEON__ +#include "arm_neon.h" +#endif + +namespace cvflann +{ + +template +inline T abs(T x) { return (x<0) ? -x : x; } + +template<> +inline int abs(int x) { return ::abs(x); } + +template<> +inline float abs(float x) { return fabsf(x); } + +template<> +inline double abs(double x) { return fabs(x); } + +template +struct Accumulator { typedef T Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; + +#undef True +#undef False + +class True +{ +}; + +class False +{ +}; + + +/** + * Squared Euclidean distance functor. + * + * This is the simpler, unrolled version. This is preferable for + * very low dimensionality data (eg 3D points) + */ +template +struct L2_Simple +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff; + for(size_t i = 0; i < size; ++i ) { + diff = *a++ - *b++; + result += diff*diff; + } + return result; + } + + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + + +/** + * Squared Euclidean distance functor, optimized version + */ +template +struct L2 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the squared Euclidean distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)(a[0] - b[0]); + diff1 = (ResultType)(a[1] - b[1]); + diff2 = (ResultType)(a[2] - b[2]); + diff3 = (ResultType)(a[3] - b[3]); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)(*a++ - *b++); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial euclidean distance, using just one dimension. This is used by the + * kd-tree when computing partial distances while traversing the tree. + * + * Squared root is omitted for efficiency. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + +/* + * Manhattan distance functor, optimized version + */ +template +struct L1 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the Manhattan (L_1) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += diff0 + diff1 + diff2 + diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return abs(a-b); + } +}; + + + +template +struct MinkowskiDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + int order; + + MinkowskiDistance(int order_) : order(order_) {} + + /** + * Compute the Minkowsky (L_p) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order); + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += pow(diff0,order); + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return pow(static_cast(abs(a-b)),order); + } +}; + + + +template +struct MaxDistance +{ + typedef False is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the max distance (L_infinity) between two vectors. + * + * This distance is not a valid kdtree distance, it's not dimensionwise additive. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = abs(a[0] - b[0]); + diff1 = abs(a[1] - b[1]); + diff2 = abs(a[2] - b[2]); + diff3 = abs(a[3] - b[3]); + if (diff0>result) {result = diff0; } + if (diff1>result) {result = diff1; } + if (diff2>result) {result = diff2; } + if (diff3>result) {result = diff3; } + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = abs(*a++ - *b++); + result = (diff0>result) ? diff0 : result; + } + return result; + } + + /* This distance functor is not dimension-wise additive, which + * makes it an invalid kd-tree distance, not implementing the accum_dist method */ + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()(const unsigned char* a, const unsigned char* b, int size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (int i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set) + * That code was taken from brief.cpp in OpenCV + */ +template +struct Hamming +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + + typedef T ElementType; + typedef int ResultType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = 0; +#ifdef __GNUC__ +#ifdef __ARM_NEON__ + { + uint32x4_t bits = vmovq_n_u32(0); + for (size_t i = 0; i < size; i += 16) { + uint8x16_t A_vec = vld1q_u8 (a + i); + uint8x16_t B_vec = vld1q_u8 (b + i); + uint8x16_t AxorB = veorq_u8 (A_vec, B_vec); + uint8x16_t bitsSet = vcntq_u8 (AxorB); + uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet); + uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8); + bits = vaddq_u32(bits, bitSet4); + } + uint64x2_t bitSet2 = vpaddlq_u32 (bits); + result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0); + result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2); + } +#else + { + //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll) + typedef unsigned long long pop_t; + const size_t modulo = size % sizeof(pop_t); + const pop_t* a2 = reinterpret_cast (a); + const pop_t* b2 = reinterpret_cast (b); + const pop_t* a2_end = a2 + (size / sizeof(pop_t)); + + for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2)); + + if (modulo) { + //in the case where size is not dividable by sizeof(size_t) + //need to mask off the bits at the end + pop_t a_final = 0, b_final = 0; + memcpy(&a_final, a2, modulo); + memcpy(&b_final, b2, modulo); + result += __builtin_popcountll(a_final ^ b_final); + } + } +#endif //NEON +#else + HammingLUT lut; + result = lut(reinterpret_cast (a), + reinterpret_cast (b), size * sizeof(pop_t)); +#endif + return result; + } +}; + +template +struct Hamming2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef T ElementType; + typedef int ResultType; + + /** This is popcount_3() from: + * http://en.wikipedia.org/wiki/Hamming_weight */ + unsigned int popcnt32(uint32_t n) const + { + n -= ((n >> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >> 2) & 0x33333333); + return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24; + } + +#ifdef FLANN_PLATFORM_64_BIT + unsigned int popcnt64(uint64_t n) const + { + n -= ((n >> 1) & 0x5555555555555555); + n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333); + return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56; + } +#endif + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + const uint64_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= (sizeof(uint64_t)/sizeof(unsigned char)); + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa ^ *pb); + ++pa; + ++pb; + } +#else + const uint32_t* pa = reinterpret_cast(a); + const uint32_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= (sizeof(uint32_t)/sizeof(unsigned char)); + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa ^ *pb); + ++pa; + ++pb; + } +#endif + return result; + } +}; + + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct HistIntersectionDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType min0, min1, min2, min3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]); + min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]); + min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]); + min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]); + result += min0 + min1 + min2 + min3; + a += 4; + b += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + min0 = (ResultType)(*a < *b ? *a : *b); + result += min0; + ++a; + ++b; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return a +struct HellingerDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = sqrt(static_cast(a[0])) - sqrt(static_cast(b[0])); + diff1 = sqrt(static_cast(a[1])) - sqrt(static_cast(b[1])); + diff2 = sqrt(static_cast(a[2])) - sqrt(static_cast(b[2])); + diff3 = sqrt(static_cast(a[3])) - sqrt(static_cast(b[3])); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + } + while (a < last) { + diff0 = sqrt(static_cast(*a++)) - sqrt(static_cast(*b++)); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return sqrt(static_cast(a)) - sqrt(static_cast(b)); + } +}; + + +template +struct ChiSquareDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the chi-square distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType sum, diff; + Iterator1 last = a + size; + + while (a < last) { + sum = (ResultType)(*a + *b); + if (sum>0) { + diff = (ResultType)(*a - *b); + result += diff*diff/sum; + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType sum, diff; + + sum = (ResultType)(a+b); + if (sum>0) { + diff = (ResultType)(a-b); + result = diff*diff/sum; + } + return result; + } +}; + + +template +struct KL_Divergence +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the Kullback–Leibler divergence + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + Iterator1 last = a + size; + + while (a < last) { + if (* a != 0) { + ResultType ratio = (ResultType)(*a / *b); + if (ratio>0) { + result += *a * log(ratio); + } + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType ratio = (ResultType)(a / b); + if (ratio>0) { + result = a * log(ratio); + } + return result; + } +}; + + + +/* + * This is a "zero iterator". It basically behaves like a zero filled + * array to all algorithms that use arrays as iterators (STL style). + * It's useful when there's a need to compute the distance between feature + * and origin it and allows for better compiler optimisation than using a + * zero-filled array. + */ +template +struct ZeroIterator +{ + + T operator*() + { + return 0; + } + + T operator[](int) + { + return 0; + } + + const ZeroIterator& operator ++() + { + return *this; + } + + ZeroIterator operator ++(int) + { + return *this; + } + + ZeroIterator& operator+=(int) + { + return *this; + } + +}; + +} + +#endif //OPENCV_FLANN_DIST_H_ diff --git a/OpenCV/Headers/flann/dummy.h b/OpenCV/Headers/flann/dummy.h new file mode 100644 index 0000000000..26bd3fa5dd --- /dev/null +++ b/OpenCV/Headers/flann/dummy.h @@ -0,0 +1,16 @@ + +#ifndef OPENCV_FLANN_DUMMY_H_ +#define OPENCV_FLANN_DUMMY_H_ + +namespace cvflann +{ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS +__declspec(dllexport) +#endif +void dummyfunc(); + +} + + +#endif /* OPENCV_FLANN_DUMMY_H_ */ diff --git a/OpenCV/Headers/flann/dynamic_bitset.h b/OpenCV/Headers/flann/dynamic_bitset.h new file mode 100644 index 0000000000..bfd39cea48 --- /dev/null +++ b/OpenCV/Headers/flann/dynamic_bitset.h @@ -0,0 +1,159 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_ +#define OPENCV_FLANN_DYNAMIC_BITSET_H_ + +#ifndef FLANN_USE_BOOST +# define FLANN_USE_BOOST 0 +#endif +//#define FLANN_USE_BOOST 1 +#if FLANN_USE_BOOST +#include +typedef boost::dynamic_bitset<> DynamicBitset; +#else + +#include + +#include "dist.h" + +namespace cvflann { + +/** Class re-implementing the boost version of it + * This helps not depending on boost, it also does not do the bound checks + * and has a way to reset a block for speed + */ +class DynamicBitset +{ +public: + /** @param default constructor + */ + DynamicBitset() + { + } + + /** @param only constructor we use in our code + * @param the size of the bitset (in bits) + */ + DynamicBitset(size_t sz) + { + resize(sz); + reset(); + } + + /** Sets all the bits to 0 + */ + void clear() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief checks if the bitset is empty + * @return true if the bitset is empty + */ + bool empty() const + { + return bitset_.empty(); + } + + /** @param set all the bits to 0 + */ + void reset() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief set one bit to 0 + * @param + */ + void reset(size_t index) + { + bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_)); + } + + /** @brief sets a specific bit to 0, and more bits too + * This function is useful when resetting a given set of bits so that the + * whole bitset ends up being 0: if that's the case, we don't care about setting + * other bits to 0 + * @param + */ + void reset_block(size_t index) + { + bitset_[index / cell_bit_size_] = 0; + } + + /** @param resize the bitset so that it contains at least size bits + * @param size + */ + void resize(size_t sz) + { + size_ = sz; + bitset_.resize(sz / cell_bit_size_ + 1); + } + + /** @param set a bit to true + * @param index the index of the bit to set to 1 + */ + void set(size_t index) + { + bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_); + } + + /** @param gives the number of contained bits + */ + size_t size() const + { + return size_; + } + + /** @param check if a bit is set + * @param index the index of the bit to check + * @return true if the bit is set + */ + bool test(size_t index) const + { + return (bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_))) != 0; + } + +private: + std::vector bitset_; + size_t size_; + static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t); +}; + +} // namespace cvflann + +#endif + +#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_ diff --git a/OpenCV/Headers/flann/flann.hpp b/OpenCV/Headers/flann/flann.hpp new file mode 100644 index 0000000000..d053488ed4 --- /dev/null +++ b/OpenCV/Headers/flann/flann.hpp @@ -0,0 +1,427 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _OPENCV_FLANN_HPP_ +#define _OPENCV_FLANN_HPP_ + +#ifdef __cplusplus + +#include "opencv2/core/types_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/flann/flann_base.hpp" +#include "opencv2/flann/miniflann.hpp" + +namespace cvflann +{ + CV_EXPORTS flann_distance_t flann_distance_type(); + FLANN_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order); +} + + +namespace cv +{ +namespace flann +{ + +template struct CvType {}; +template <> struct CvType { static int type() { return CV_8U; } }; +template <> struct CvType { static int type() { return CV_8S; } }; +template <> struct CvType { static int type() { return CV_16U; } }; +template <> struct CvType { static int type() { return CV_16S; } }; +template <> struct CvType { static int type() { return CV_32S; } }; +template <> struct CvType { static int type() { return CV_32F; } }; +template <> struct CvType { static int type() { return CV_64F; } }; + + +// bring the flann parameters into this namespace +using ::cvflann::get_param; +using ::cvflann::print_params; + +// bring the flann distances into this namespace +using ::cvflann::L2_Simple; +using ::cvflann::L2; +using ::cvflann::L1; +using ::cvflann::MinkowskiDistance; +using ::cvflann::MaxDistance; +using ::cvflann::HammingLUT; +using ::cvflann::Hamming; +using ::cvflann::Hamming2; +using ::cvflann::HistIntersectionDistance; +using ::cvflann::HellingerDistance; +using ::cvflann::ChiSquareDistance; +using ::cvflann::KL_Divergence; + + + +template +class GenericIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance()); + + ~GenericIndex(); + + void knnSearch(const vector& query, vector& indices, + vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + int radiusSearch(const vector& query, vector& indices, + vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, + DistanceType radius, const ::cvflann::SearchParams& params); + + void save(std::string filename) { nnIndex->save(filename); } + + int veclen() const { return nnIndex->veclen(); } + + int size() const { return nnIndex->size(); } + + ::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); } + + FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); } + +private: + ::cvflann::Index* nnIndex; +}; + + +#define FLANN_DISTANCE_CHECK \ + if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \ + printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\ + "the distance using cvflann::set_distance_type. This is no longer working as expected "\ + "(cv::flann::Index always uses L2). You should create the index templated on the distance, "\ + "for example for L1 distance use: GenericIndex< L1 > \n"); \ + } + + +template +GenericIndex::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance) +{ + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + nnIndex = new ::cvflann::Index(m_dataset, params, distance); + + FLANN_DISTANCE_CHECK + + nnIndex->buildIndex(); +} + +template +GenericIndex::~GenericIndex() +{ + delete nnIndex; +} + +template +void GenericIndex::knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void GenericIndex::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int GenericIndex::radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int GenericIndex::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +/** + * @deprecated Use GenericIndex class instead + */ +template +class +#ifndef _MSC_VER + FLANN_DEPRECATED +#endif + Index_ { +public: + typedef typename L2::ElementType ElementType; + typedef typename L2::ResultType DistanceType; + + Index_(const Mat& features, const ::cvflann::IndexParams& params); + + ~Index_(); + + void knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + int radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& params); + + void save(std::string filename) + { + if (nnIndex_L1) nnIndex_L1->save(filename); + if (nnIndex_L2) nnIndex_L2->save(filename); + } + + int veclen() const + { + if (nnIndex_L1) return nnIndex_L1->veclen(); + if (nnIndex_L2) return nnIndex_L2->veclen(); + } + + int size() const + { + if (nnIndex_L1) return nnIndex_L1->size(); + if (nnIndex_L2) return nnIndex_L2->size(); + } + + ::cvflann::IndexParams getParameters() + { + if (nnIndex_L1) return nnIndex_L1->getParameters(); + if (nnIndex_L2) return nnIndex_L2->getParameters(); + + } + + FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() + { + if (nnIndex_L1) return nnIndex_L1->getIndexParameters(); + if (nnIndex_L2) return nnIndex_L2->getIndexParameters(); + } + +private: + // providing backwards compatibility for L2 and L1 distances (most common) + ::cvflann::Index< L2 >* nnIndex_L2; + ::cvflann::Index< L1 >* nnIndex_L1; +}; + +#ifdef _MSC_VER +template +class FLANN_DEPRECATED Index_; +#endif + +template +Index_::Index_(const Mat& dataset, const ::cvflann::IndexParams& params) +{ + printf("[WARNING] The cv::flann::Index_ class is deperecated, use cv::flann::GenericIndex instead\n"); + + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + nnIndex_L1 = NULL; + nnIndex_L2 = new ::cvflann::Index< L2 >(m_dataset, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + nnIndex_L1 = new ::cvflann::Index< L1 >(m_dataset, params); + nnIndex_L2 = NULL; + } + else { + printf("[ERROR] cv::flann::Index_ only provides backwards compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::GenericIndex\n"); + CV_Assert(0); + } + if (nnIndex_L1) nnIndex_L1->buildIndex(); + if (nnIndex_L2) nnIndex_L2->buildIndex(); +} + +template +Index_::~Index_() +{ + if (nnIndex_L1) delete nnIndex_L1; + if (nnIndex_L2) delete nnIndex_L2; +} + +template +void Index_::knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void Index_::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int Index_::radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int Index_::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + + +template +int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params, + Distance d = Distance()) +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + CV_Assert(features.type() == CvType::type()); + CV_Assert(features.isContinuous()); + ::cvflann::Matrix m_features((ElementType*)features.ptr(0), features.rows, features.cols); + + CV_Assert(centers.type() == CvType::type()); + CV_Assert(centers.isContinuous()); + ::cvflann::Matrix m_centers((DistanceType*)centers.ptr(0), centers.rows, centers.cols); + + return ::cvflann::hierarchicalClustering(m_features, m_centers, params, d); +} + + +template +FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params) +{ + printf("[WARNING] cv::flann::hierarchicalClustering is deprecated, use " + "cv::flann::hierarchicalClustering instead\n"); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + return hierarchicalClustering< L2 >(features, centers, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + return hierarchicalClustering< L1 >(features, centers, params); + } + else { + printf("[ERROR] cv::flann::hierarchicalClustering only provides backwards " + "compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::hierarchicalClustering\n"); + CV_Assert(0); + } +} + +} } // namespace cv::flann + +#endif // __cplusplus + +#endif diff --git a/OpenCV/Headers/flann/flann_base.hpp b/OpenCV/Headers/flann/flann_base.hpp new file mode 100644 index 0000000000..b5ba7d79e2 --- /dev/null +++ b/OpenCV/Headers/flann/flann_base.hpp @@ -0,0 +1,291 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_BASE_HPP_ +#define OPENCV_FLANN_BASE_HPP_ + +#include +#include +#include +#include + +#include "general.h" +#include "matrix.h" +#include "params.h" +#include "saving.h" + +#include "all_indices.h" + +namespace cvflann +{ + +/** + * Sets the log level used for all flann functions + * @param level Verbosity level + */ +inline void log_verbosity(int level) +{ + if (level >= 0) { + Logger::setLevel(level); + } +} + +/** + * (Deprecated) Index parameters for creating a saved index. + */ +struct SavedIndexParams : public IndexParams +{ + SavedIndexParams(std::string filename) + { + (* this)["algorithm"] = FLANN_INDEX_SAVED; + (*this)["filename"] = filename; + } +}; + + +template +NNIndex* load_saved_index(const Matrix& dataset, const std::string& filename, Distance distance) +{ + typedef typename Distance::ElementType ElementType; + + FILE* fin = fopen(filename.c_str(), "rb"); + if (fin == NULL) { + return NULL; + } + IndexHeader header = load_header(fin); + if (header.data_type != Datatype::type()) { + throw FLANNException("Datatype of saved index is different than of the one to be created."); + } + if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) { + throw FLANNException("The index saved belongs to a different dataset"); + } + + IndexParams params; + params["algorithm"] = header.index_type; + NNIndex* nnIndex = create_index_by_type(dataset, params, distance); + nnIndex->loadIndex(fin); + fclose(fin); + + return nnIndex; +} + + +template +class Index : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + Index(const Matrix& features, const IndexParams& params, Distance distance = Distance() ) + : index_params_(params) + { + flann_algorithm_t index_type = get_param(params,"algorithm"); + loaded_ = false; + + if (index_type == FLANN_INDEX_SAVED) { + nnIndex_ = load_saved_index(features, get_param(params,"filename"), distance); + loaded_ = true; + } + else { + nnIndex_ = create_index_by_type(features, params, distance); + } + } + + ~Index() + { + delete nnIndex_; + } + + /** + * Builds the index. + */ + void buildIndex() + { + if (!loaded_) { + nnIndex_->buildIndex(); + } + } + + void save(std::string filename) + { + FILE* fout = fopen(filename.c_str(), "wb"); + if (fout == NULL) { + throw FLANNException("Cannot open file"); + } + save_header(fout, *nnIndex_); + saveIndex(fout); + fclose(fout); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) + { + nnIndex_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) + { + nnIndex_->loadIndex(stream); + } + + /** + * \returns number of features in this index. + */ + size_t veclen() const + { + return nnIndex_->veclen(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t size() const + { + return nnIndex_->size(); + } + + /** + * \returns The index type (kdtree, kmeans,...) + */ + flann_algorithm_t getType() const + { + return nnIndex_->getType(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const + { + return nnIndex_->usedMemory(); + } + + + /** + * \returns The index parameters + */ + IndexParams getParameters() const + { + return nnIndex_->getParameters(); + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + nnIndex_->knnSearch(queries, indices, dists, knn, params); + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + return nnIndex_->radiusSearch(query, indices, dists, radius, params); + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + nnIndex_->findNeighbors(result, vec, searchParams); + } + + /** + * \brief Returns actual index + */ + FLANN_DEPRECATED NNIndex* getIndex() + { + return nnIndex_; + } + + /** + * \brief Returns index parameters. + * \deprecated use getParameters() instead. + */ + FLANN_DEPRECATED const IndexParams* getIndexParameters() + { + return &index_params_; + } + +private: + /** Pointer to actual index class */ + NNIndex* nnIndex_; + /** Indices if the index was loaded from a file */ + bool loaded_; + /** Parameters passed to the index */ + IndexParams index_params_; +}; + +/** + * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the + * the clustering tree to return a flat clustering. + * @param[in] points Points to be clustered + * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the + * number of clusters requested. + * @param params Clustering parameters (The same as for cvflann::KMeansIndex) + * @param d Distance to be used for clustering (eg: cvflann::L2) + * @return number of clusters computed (can be different than clusters.rows and is the highest number + * of the form (branching-1)*K+1 smaller than clusters.rows). + */ +template +int hierarchicalClustering(const Matrix& points, Matrix& centers, + const KMeansIndexParams& params, Distance d = Distance()) +{ + KMeansIndex kmeans(points, params, d); + kmeans.buildIndex(); + + int clusterNum = kmeans.getClusterCenters(centers); + return clusterNum; +} + +} +#endif /* OPENCV_FLANN_BASE_HPP_ */ diff --git a/OpenCV/Headers/flann/general.h b/OpenCV/Headers/flann/general.h new file mode 100644 index 0000000000..87e7e2f288 --- /dev/null +++ b/OpenCV/Headers/flann/general.h @@ -0,0 +1,52 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GENERAL_H_ +#define OPENCV_FLANN_GENERAL_H_ + +#include "defines.h" +#include +#include + +namespace cvflann +{ + +class FLANNException : public std::runtime_error +{ +public: + FLANNException(const char* message) : std::runtime_error(message) { } + + FLANNException(const std::string& message) : std::runtime_error(message) { } +}; + +} + + +#endif /* OPENCV_FLANN_GENERAL_H_ */ diff --git a/OpenCV/Headers/flann/ground_truth.h b/OpenCV/Headers/flann/ground_truth.h new file mode 100644 index 0000000000..69d978ba07 --- /dev/null +++ b/OpenCV/Headers/flann/ground_truth.h @@ -0,0 +1,95 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GROUND_TRUTH_H_ +#define OPENCV_FLANN_GROUND_TRUTH_H_ + +#include "dist.h" +#include "matrix.h" + + +namespace cvflann +{ + +template +void find_nearest(const Matrix& dataset, typename Distance::ElementType* query, int* matches, int nn, + int skip = 0, Distance distance = Distance()) +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + int n = nn + skip; + + std::vector match(n); + std::vector dists(n); + + dists[0] = distance(dataset[0], query, dataset.cols); + match[0] = 0; + int dcnt = 1; + + for (size_t i=1; i=1 && dists[j] +void compute_ground_truth(const Matrix& dataset, const Matrix& testset, Matrix& matches, + int skip=0, Distance d = Distance()) +{ + for (size_t i=0; i(dataset, testset[i], matches[i], (int)matches.cols, skip, d); + } +} + + +} + +#endif //OPENCV_FLANN_GROUND_TRUTH_H_ diff --git a/OpenCV/Headers/flann/hdf5.h b/OpenCV/Headers/flann/hdf5.h new file mode 100644 index 0000000000..ef3e999738 --- /dev/null +++ b/OpenCV/Headers/flann/hdf5.h @@ -0,0 +1,231 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_HDF5_H_ +#define OPENCV_FLANN_HDF5_H_ + +#include + +#include "matrix.h" + + +namespace cvflann +{ + +namespace +{ + +template +hid_t get_hdf5_type() +{ + throw FLANNException("Unsupported type for IO operations"); +} + +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_CHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UCHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_SHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_USHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_INT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UINT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_LONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_ULONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_FLOAT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_DOUBLE; } +} + + +#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y)); + +template +void save_to_file(const cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + +#if H5Eset_auto_vers == 2 + H5Eset_auto( H5E_DEFAULT, NULL, NULL ); +#else + H5Eset_auto( NULL, NULL ); +#endif + + herr_t status; + hid_t file_id; + file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + if (file_id < 0) { + file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + CHECK_ERROR(file_id,"Error creating hdf5 file."); + + hsize_t dimsf[2]; // dataset dimensions + dimsf[0] = dataset.rows; + dimsf[1] = dataset.cols; + + hid_t space_id = H5Screate_simple(2, dimsf, NULL); + hid_t memspace_id = H5Screate_simple(2, dimsf, NULL); + + hid_t dataset_id; +#if H5Dcreate_vers == 2 + dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); +#else + dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT); +#endif + + if (dataset_id<0) { +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + } + CHECK_ERROR(dataset_id,"Error creating or opening dataset in file."); + + status = H5Dwrite(dataset_id, get_hdf5_type(), memspace_id, space_id, H5P_DEFAULT, dataset.data ); + CHECK_ERROR(status, "Error writing to dataset"); + + H5Sclose(memspace_id); + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); + +} + + +template +void load_from_file(cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + herr_t status; + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + + hsize_t dims_out[2]; + H5Sget_simple_extent_dims(space_id, dims_out, NULL); + + dataset = cvflann::Matrix(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]); + + status = H5Dread(dataset_id, get_hdf5_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]); + CHECK_ERROR(status, "Error reading dataset"); + + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} + + +#ifdef HAVE_MPI + +namespace mpi +{ +/** + * Loads a the hyperslice corresponding to this processor from a hdf5 file. + * @param flann_dataset Dataset where the data is loaded + * @param filename HDF5 file name + * @param name Name of dataset inside file + */ +template +void load_from_file(cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + int mpi_size, mpi_rank; + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + herr_t status; + + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist_id, comm, info); + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + H5Pclose(plist_id); + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + hsize_t dims[2]; + H5Sget_simple_extent_dims(space_id, dims, NULL); + + hsize_t count[2]; + hsize_t offset[2]; + + hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1); + hsize_t cnt = (mpi_rank(), memspace_id, space_id, plist_id, dataset.data); + CHECK_ERROR(status, "Error reading dataset"); + + H5Pclose(plist_id); + H5Sclose(space_id); + H5Sclose(memspace_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} +} +#endif // HAVE_MPI +} // namespace cvflann::mpi + +#endif /* OPENCV_FLANN_HDF5_H_ */ diff --git a/OpenCV/Headers/flann/heap.h b/OpenCV/Headers/flann/heap.h new file mode 100644 index 0000000000..92a6ea614b --- /dev/null +++ b/OpenCV/Headers/flann/heap.h @@ -0,0 +1,165 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HEAP_H_ +#define OPENCV_FLANN_HEAP_H_ + +#include +#include + +namespace cvflann +{ + +/** + * Priority Queue Implementation + * + * The priority queue is implemented with a heap. A heap is a complete + * (full) binary tree in which each parent is less than both of its + * children, but the order of the children is unspecified. + */ +template +class Heap +{ + + /** + * Storage array for the heap. + * Type T must be comparable. + */ + std::vector heap; + int length; + + /** + * Number of element in the heap + */ + int count; + + + +public: + /** + * Constructor. + * + * Params: + * sz = heap size + */ + + Heap(int sz) + { + length = sz; + heap.reserve(length); + count = 0; + } + + /** + * + * Returns: heap size + */ + int size() + { + return count; + } + + /** + * Tests if the heap is empty + * + * Returns: true is heap empty, false otherwise + */ + bool empty() + { + return size()==0; + } + + /** + * Clears the heap. + */ + void clear() + { + heap.clear(); + count = 0; + } + + struct CompareT + { + bool operator()(const T& t_1, const T& t_2) const + { + return t_2 < t_1; + } + }; + + /** + * Insert a new element in the heap. + * + * We select the next empty leaf node, and then keep moving any larger + * parents down until the right location is found to store this element. + * + * Params: + * value = the new element to be inserted in the heap + */ + void insert(T value) + { + /* If heap is full, then return without adding this element. */ + if (count == length) { + return; + } + + heap.push_back(value); + static CompareT compareT; + std::push_heap(heap.begin(), heap.end(), compareT); + ++count; + } + + + + /** + * Returns the node of minimum value from the heap (top of the heap). + * + * Params: + * value = out parameter used to return the min element + * Returns: false if heap empty + */ + bool popMin(T& value) + { + if (count == 0) { + return false; + } + + value = heap[0]; + static CompareT compareT; + std::pop_heap(heap.begin(), heap.end(), compareT); + heap.pop_back(); + --count; + + return true; /* Return old last node. */ + } +}; + +} + +#endif //OPENCV_FLANN_HEAP_H_ diff --git a/OpenCV/Headers/flann/hierarchical_clustering_index.h b/OpenCV/Headers/flann/hierarchical_clustering_index.h new file mode 100644 index 0000000000..ce2d622450 --- /dev/null +++ b/OpenCV/Headers/flann/hierarchical_clustering_index.h @@ -0,0 +1,717 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ +#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ + +#include +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, + int trees = 4, int leaf_size = 100) + { + (*this)["algorithm"] = FLANN_INDEX_HIERARCHICAL; + // The branching factor used in the hierarchical clustering + (*this)["branching"] = branching; + // Algorithm used for picking the initial cluster centers + (*this)["centers_init"] = centers_init; + // number of parallel trees to build + (*this)["trees"] = trees; + // maximum leaf size + (*this)["leaf_size"] = leaf_size; + } +}; + + +/** + * Hierarchical index + * + * Contains a tree constructed through a hierarchical clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class HierarchicalClusteringIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +private: + + + typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = dsindices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = dsindices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + assert(index >=0 && index < n); + centers[0] = dsindices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = 0; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) newPot += std::min( distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols), closestDistSq[i] ); + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = dsindices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols), closestDistSq[i] ); + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + +public: + + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + HierarchicalClusteringIndex(const Matrix& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(), + Distance d = Distance()) + : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d) + { + memoryCounter = 0; + + size_ = dataset.rows; + veclen_ = dataset.cols; + + branching_ = get_param(params,"branching",32); + centers_init_ = get_param(params,"centers_init", FLANN_CENTERS_RANDOM); + trees_ = get_param(params,"trees",4); + leaf_size_ = get_param(params,"leaf_size",100); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp; + } + else { + throw FLANNException("Unknown algorithm for choosing initial centers."); + } + + trees_ = get_param(params,"trees",4); + root = new NodePtr[trees_]; + indices = new int*[trees_]; + } + + HierarchicalClusteringIndex(const HierarchicalClusteringIndex&); + HierarchicalClusteringIndex& operator=(const HierarchicalClusteringIndex&); + + /** + * Index destructor. + * + * Release the memory used by the index. + */ + virtual ~HierarchicalClusteringIndex() + { + if (indices!=NULL) { + delete[] indices; + } + } + + /** + * Returns size of index. + */ + size_t size() const + { + return size_; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return veclen_; + } + + + /** + * Computes the inde memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return pool.usedMemory+pool.wastedMemory+memoryCounter; + } + + /** + * Builds the index + */ + void buildIndex() + { + if (branching_<2) { + throw FLANNException("Branching factor must be at least 2"); + } + for (int i=0; i(); + computeClustering(root[i], indices[i], (int)size_, branching_,0); + } + } + + + flann_algorithm_t getType() const + { + return FLANN_INDEX_HIERARCHICAL; + } + + + void saveIndex(FILE* stream) + { + save_value(stream, branching_); + save_value(stream, trees_); + save_value(stream, centers_init_); + save_value(stream, leaf_size_); + save_value(stream, memoryCounter); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) + { + + int maxChecks = get_param(searchParams,"checks",32); + + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + std::vector checked(size_,false); + int checks = 0; + for (int i=0; ipopMin(branch) && (checks BranchSt; + + + + void save_tree(FILE* stream, NodePtr node, int num) + { + save_value(stream, *node); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices[num]); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i], num); + } + } + } + + + void load_tree(FILE* stream, NodePtr& node, int num) + { + node = pool.allocate(); + load_value(stream, *node); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices[num] + indices_offset; + } + else { + node->childs = pool.allocate(branching_); + for(int i=0; ichilds[i], num); + } + } + } + + + + + void computeLabels(int* dsindices, int indices_length, int* centers, int centers_length, int* labels, DistanceType& cost) + { + cost = 0; + for (int i=0; inew_dist) { + labels[i] = j; + dist = new_dist; + } + } + cost += dist; + } + } + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < leaf_size_) { // leaf node + node->indices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + std::vector centers(branching); + std::vector labels(indices_length); + + int centers_length; + (this->*chooseCenters)(branching, dsindices, indices_length, ¢ers[0], centers_length); + + if (centers_lengthindices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + + // assign points to clusters + DistanceType cost; + computeLabels(dsindices, indices_length, ¢ers[0], centers_length, &labels[0], cost); + + node->childs = pool.allocate(branching); + int start = 0; + int end = start; + for (int i=0; ichilds[i] = pool.allocate(); + node->childs[i]->pivot = centers[i]; + node->childs[i]->indices = NULL; + computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1); + start=end; + } + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(NodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap, std::vector& checked) + { + if (node->childs==NULL) { + if (checks>=maxChecks) { + if (result.full()) return; + } + for (int i=0; isize; ++i) { + int index = node->indices[i]; + if (!checked[index]) { + DistanceType dist = distance(dataset[index], vec, veclen_); + result.addPoint(dist, index); + checked[index] = true; + ++checks; + } + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int best_index = 0; + domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_); + for (int i=1; ichilds[i]->pivot], veclen_); + if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + delete[] domain_distances; + findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked); + } + } + +private: + + + /** + * The dataset used by this index + */ + const Matrix dataset; + + /** + * Parameters used by this index + */ + IndexParams params; + + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + NodePtr* root; + + /** + * Array of indices to vectors in the dataset. + */ + int** indices; + + + /** + * The distance + */ + Distance distance; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool; + + /** + * Memory occupied by the index. + */ + int memoryCounter; + + /** index parameters */ + int branching_; + int trees_; + flann_centers_init_t centers_init_; + int leaf_size_; + + +}; + +} + +#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */ diff --git a/OpenCV/Headers/flann/index_testing.h b/OpenCV/Headers/flann/index_testing.h new file mode 100644 index 0000000000..d76400409a --- /dev/null +++ b/OpenCV/Headers/flann/index_testing.h @@ -0,0 +1,318 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_INDEX_TESTING_H_ +#define OPENCV_FLANN_INDEX_TESTING_H_ + +#include +#include +#include + +#include "matrix.h" +#include "nn_index.h" +#include "result_set.h" +#include "logger.h" +#include "timer.h" + + +namespace cvflann +{ + +inline int countCorrectMatches(int* neighbors, int* groundTruth, int n) +{ + int count = 0; + for (int i=0; i +typename Distance::ResultType computeDistanceRaport(const Matrix& inputData, typename Distance::ElementType* target, + int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance) +{ + typedef typename Distance::ResultType DistanceType; + + DistanceType ret = 0; + for (int i=0; i +float search_with_ground_truth(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, int nn, int checks, + float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches) +{ + typedef typename Distance::ResultType DistanceType; + + if (matches.cols resultSet(nn+skipMatches); + SearchParams searchParams(checks); + + std::vector indices(nn+skipMatches); + std::vector dists(nn+skipMatches); + int* neighbors = &indices[skipMatches]; + + int correct = 0; + DistanceType distR = 0; + StartStopTimer t; + int repeats = 0; + while (t.value<0.2) { + repeats++; + t.start(); + correct = 0; + distR = 0; + for (size_t i = 0; i < testData.rows; i++) { + resultSet.init(&indices[0], &dists[0]); + index.findNeighbors(resultSet, testData[i], searchParams); + + correct += countCorrectMatches(neighbors,matches[i], nn); + distR += computeDistanceRaport(inputData, testData[i], neighbors, matches[i], (int)testData.cols, nn, distance); + } + t.stop(); + } + time = float(t.value/repeats); + + float precicion = (float)correct/(nn*testData.rows); + + dist = distR/(testData.rows*nn); + + Logger::info("%8d %10.4g %10.5g %10.5g %10.5g\n", + checks, precicion, time, 1000.0 * time / testData.rows, dist); + + return precicion; +} + + +template +float test_index_checks(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + float time = 0; + DistanceType dist = 0; + precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches); + + return time; +} + +template +float test_index_precision(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + const float SEARCH_EPS = 0.001f; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + int c1 = 1; + //float p1; + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + if (p2>precision) { + Logger::info("Got as close as I can\n"); + checks = c2; + return time; + } + + while (p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +void test_index_precisions(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0) +{ + typedef typename Distance::ResultType DistanceType; + + const float SEARCH_EPS = 0.001; + + // make sure precisions array is sorted + std::sort(precisions, precisions+precisions_length); + + int pindex = 0; + float precision = precisions[pindex]; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + + int c1 = 1; + float p1; + + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + // if precision for 1 run down the tree is already + // better then some of the requested precisions, then + // skip those + while (precisions[pindex] 0)&&(time > maxTime)&&(p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dynamic_bitset.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees = 4) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE; + (*this)["trees"] = trees; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeIndex(const Matrix& inputData, const IndexParams& params = KDTreeIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + trees_ = get_param(index_params_,"trees",4); + tree_roots_ = new NodePtr[trees_]; + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; ++i) { + vind_[i] = int(i); + } + + mean_ = new DistanceType[veclen_]; + var_ = new DistanceType[veclen_]; + } + + + KDTreeIndex(const KDTreeIndex&); + KDTreeIndex& operator=(const KDTreeIndex&); + + /** + * Standard destructor + */ + ~KDTreeIndex() + { + if (tree_roots_!=NULL) { + delete[] tree_roots_; + } + delete[] mean_; + delete[] var_; + } + + /** + * Builds the index + */ + void buildIndex() + { + /* Construct the randomized trees. */ + for (int i = 0; i < trees_; i++) { + /* Randomize the order of vectors to allow for unbiased sampling. */ + std::random_shuffle(vind_.begin(), vind_.end()); + tree_roots_[i] = divideTree(&vind_[0], int(size_) ); + } + } + + + flann_algorithm_t getType() const + { + return FLANN_INDEX_KDTREE; + } + + + void saveIndex(FILE* stream) + { + save_value(stream, trees_); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) + { + int maxChecks = get_param(searchParams,"checks", 32); + float epsError = 1+get_param(searchParams,"eps",0.0f); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + getExactNeighbors(result, vec, epsError); + } + else { + getNeighbors(result, vec, maxChecks, epsError); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divval; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int* ind, int count) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( count == 1) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->divfeat = *ind; /* Store index of this vec. */ + } + else { + int idx; + int cutfeat; + DistanceType cutval; + meanSplit(ind, count, idx, cutfeat, cutval); + + node->divfeat = cutfeat; + node->divval = cutval; + node->child1 = divideTree(ind, idx); + node->child2 = divideTree(ind+idx, count-idx); + } + + return node; + } + + + /** + * Choose which feature to use in order to subdivide this set of vectors. + * Make a random choice among those with the highest variance, and use + * its variance as the threshold value. + */ + void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval) + { + memset(mean_,0,veclen_*sizeof(DistanceType)); + memset(var_,0,veclen_*sizeof(DistanceType)); + + /* Compute mean values. Only the first SAMPLE_MEAN values need to be + sampled to get a good estimate. + */ + int cnt = std::min((int)SAMPLE_MEAN+1, count); + for (int j = 0; j < cnt; ++j) { + ElementType* v = dataset_[ind[j]]; + for (size_t k=0; kcount/2) index = lim1; + else if (lim2 v[topind[num-1]])) { + /* Put this element at end of topind. */ + if (num < RAND_DIM) { + topind[num++] = i; /* Add to list. */ + } + else { + topind[num-1] = i; /* Replace last element. */ + } + /* Bubble end value down to right location by repeated swapping. */ + int j = num - 1; + while (j > 0 && v[topind[j]] > v[topind[j-1]]) { + std::swap(topind[j], topind[j-1]); + --j; + } + } + } + /* Select a random integer in range [0,num-1], and return that index. */ + int rnd = rand_int(num); + return (int)topind[rnd]; + } + + + /** + * Subdivide the list of points by a plane perpendicular on axe corresponding + * to the 'cutfeat' dimension at 'cutval' position. + * + * On return: + * dataset[ind[0..lim1-1]][cutfeat]cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + /** + * Performs an exact nearest neighbor search. The exact search performs a full + * traversal of the tree. + */ + void getExactNeighbors(ResultSet& result, const ElementType* vec, float epsError) + { + // checkID -= 1; /* Set a different unique ID for each search. */ + + if (trees_ > 1) { + fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search"); + } + if (trees_>0) { + searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError); + } + assert(result.full()); + } + + /** + * Performs the approximate nearest-neighbor search. The search is approximate + * because the tree traversal is abandoned after a given number of descends in + * the tree. + */ + void getNeighbors(ResultSet& result, const ElementType* vec, int maxCheck, float epsError) + { + int i; + BranchSt branch; + + int checkCount = 0; + Heap* heap = new Heap((int)size_); + DynamicBitset checked(size_); + + /* Search once through each tree down to root. */ + for (i = 0; i < trees_; ++i) { + searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked); + } + + /* Keep searching other branches from heap until finished. */ + while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) { + searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked); + } + + delete heap; + + assert(result.full()); + } + + + /** + * Search starting from a given node of the tree. Based on any mismatches at + * higher levels, all exemplars below this level must have a distance of + * at least "mindistsq". + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck, + float epsError, Heap* heap, DynamicBitset& checked) + { + if (result_set.worstDist()child1 == NULL)&&(node->child2 == NULL)) { + /* Do not check same node more than once when searching multiple trees. + Once a vector is checked, we set its location in vind to the + current checkID. + */ + int index = node->divfeat; + if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return; + checked.set(index); + checkCount++; + + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + // if (2 * checkCount < maxCheck || !result.full()) { + if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) { + heap->insert( BranchSt(otherChild, new_distsq) ); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked); + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevelExact(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + int index = node->divfeat; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + + /* Call recursively to search next level down. */ + searchLevelExact(result_set, vec, bestChild, mindist, epsError); + + if (new_distsq*epsError<=result_set.worstDist()) { + searchLevelExact(result_set, vec, otherChild, new_distsq, epsError); + } + } + + +private: + + enum + { + /** + * To improve efficiency, only SAMPLE_MEAN random values are used to + * compute the mean and variance at each level when building a tree. + * A value of 100 seems to perform as well as using all values. + */ + SAMPLE_MEAN = 100, + /** + * Top random dimensions to consider + * + * When creating random trees, the dimension on which to subdivide is + * selected at random from among the top RAND_DIM dimensions with the + * highest variance. A value of 5 works well. + */ + RAND_DIM=5 + }; + + + /** + * Number of randomized trees that are used + */ + int trees_; + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + size_t size_; + size_t veclen_; + + + DistanceType* mean_; + DistanceType* var_; + + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr* tree_roots_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; + + +}; // class KDTreeForest + +} + +#endif //OPENCV_FLANN_KDTREE_INDEX_H_ diff --git a/OpenCV/Headers/flann/kdtree_single_index.h b/OpenCV/Headers/flann/kdtree_single_index.h new file mode 100644 index 0000000000..30488ad567 --- /dev/null +++ b/OpenCV/Headers/flann/kdtree_single_index.h @@ -0,0 +1,634 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ +#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ + +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct KDTreeSingleIndexParams : public IndexParams +{ + KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE_SINGLE; + (*this)["leaf_max_size"] = leaf_max_size; + (*this)["reorder"] = reorder; + (*this)["dim"] = dim; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeSingleIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeSingleIndex(const Matrix& inputData, const IndexParams& params = KDTreeSingleIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + dim_ = dataset_.cols; + int dim_param = get_param(params,"dim",-1); + if (dim_param>0) dim_ = dim_param; + leaf_max_size_ = get_param(params,"leaf_max_size",10); + reorder_ = get_param(params,"reorder",true); + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; i++) { + vind_[i] = (int)i; + } + } + + KDTreeSingleIndex(const KDTreeSingleIndex&); + KDTreeSingleIndex& operator=(const KDTreeSingleIndex&); + + /** + * Standard destructor + */ + ~KDTreeSingleIndex() + { + if (reorder_) delete[] data_.data; + } + + /** + * Builds the index + */ + void buildIndex() + { + computeBoundingBox(root_bbox_); + root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree + + if (reorder_) { + delete[] data_.data; + data_ = cvflann::Matrix(new ElementType[size_*dim_], size_, dim_); + for (size_t i=0; i& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + + KNNSimpleResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + float epsError = 1+get_param(searchParams,"eps",0.0f); + + std::vector dists(dim_,0); + DistanceType distsq = computeInitialDistances(vec, dists); + searchLevel(result, vec, root_node_, distsq, dists, epsError); + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Indices of points in leaf node + */ + int left, right; + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divlow, divhigh; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + + + struct Interval + { + DistanceType low, high; + }; + + typedef std::vector BoundingBox; + + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + void computeBoundingBox(BoundingBox& bbox) + { + bbox.resize(dim_); + for (size_t i=0; ibbox[i].high) bbox[i].high = (DistanceType)dataset_[k][i]; + } + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int left, int right, BoundingBox& bbox) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( (right-left) <= leaf_max_size_) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->left = left; + node->right = right; + + // compute bounding-box of leaf points + for (size_t i=0; idataset_[vind_[k]][i]) bbox[i].low=(DistanceType)dataset_[vind_[k]][i]; + if (bbox[i].highdivfeat = cutfeat; + + BoundingBox left_bbox(bbox); + left_bbox[cutfeat].high = cutval; + node->child1 = divideTree(left, left+idx, left_bbox); + + BoundingBox right_bbox(bbox); + right_bbox[cutfeat].low = cutval; + node->child2 = divideTree(left+idx, right, right_bbox); + + node->divlow = left_bbox[cutfeat].high; + node->divhigh = right_bbox[cutfeat].low; + + for (size_t i=0; imax_elem) max_elem = val; + } + } + + void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox) + { + // find the largest span from the approximate bounding box + ElementType max_span = bbox[0].high-bbox[0].low; + cutfeat = 0; + cutval = (bbox[0].high+bbox[0].low)/2; + for (size_t i=1; imax_span) { + max_span = span; + cutfeat = i; + cutval = (bbox[i].high+bbox[i].low)/2; + } + } + + // compute exact span on the found dimension + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + cutval = (min_elem+max_elem)/2; + max_span = max_elem - min_elem; + + // check if a dimension of a largest span exists + size_t k = cutfeat; + for (size_t i=0; imax_span) { + computeMinMax(ind, count, i, min_elem, max_elem); + span = max_elem - min_elem; + if (span>max_span) { + max_span = span; + cutfeat = i; + cutval = (min_elem+max_elem)/2; + } + } + } + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2max_span) { + max_span = span; + } + } + DistanceType max_spread = -1; + cutfeat = 0; + for (size_t i=0; i(DistanceType)((1-EPS)*max_span)) { + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + DistanceType spread = (DistanceType)(max_elem-min_elem); + if (spread>max_spread) { + cutfeat = (int)i; + max_spread = spread; + } + } + } + // split in the middle + DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2; + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + + if (split_valmax_elem) cutval = (DistanceType)max_elem; + else cutval = split_val; + + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + /* If either list is empty, it means that all remaining features + * are identical. Split in the middle to maintain a balanced tree. + */ + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + DistanceType computeInitialDistances(const ElementType* vec, std::vector& dists) + { + DistanceType distsq = 0.0; + + for (size_t i = 0; i < dim_; ++i) { + if (vec[i] < root_bbox_[i].low) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, (int)i); + distsq += dists[i]; + } + if (vec[i] > root_bbox_[i].high) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, (int)i); + distsq += dists[i]; + } + } + + return distsq; + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, + std::vector& dists, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + DistanceType worst_dist = result_set.worstDist(); + for (int i=node->left; iright; ++i) { + int index = reorder_ ? i : vind_[i]; + DistanceType dist = distance_(vec, data_[index], dim_, worst_dist); + if (distdivfeat; + ElementType val = vec[idx]; + DistanceType diff1 = val - node->divlow; + DistanceType diff2 = val - node->divhigh; + + NodePtr bestChild; + NodePtr otherChild; + DistanceType cut_dist; + if ((diff1+diff2)<0) { + bestChild = node->child1; + otherChild = node->child2; + cut_dist = distance_.accum_dist(val, node->divhigh, idx); + } + else { + bestChild = node->child2; + otherChild = node->child1; + cut_dist = distance_.accum_dist( val, node->divlow, idx); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); + + DistanceType dst = dists[idx]; + mindistsq = mindistsq + cut_dist - dst; + dists[idx] = cut_dist; + if (mindistsq*epsError<=result_set.worstDist()) { + searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); + } + dists[idx] = dst; + } + +private: + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + int leaf_max_size_; + bool reorder_; + + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + Matrix data_; + + size_t size_; + size_t dim_; + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr root_node_; + + BoundingBox root_bbox_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; +}; // class KDTree + +} + +#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ diff --git a/OpenCV/Headers/flann/kmeans_index.h b/OpenCV/Headers/flann/kmeans_index.h new file mode 100644 index 0000000000..3fea956a74 --- /dev/null +++ b/OpenCV/Headers/flann/kmeans_index.h @@ -0,0 +1,1114 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KMEANS_INDEX_H_ +#define OPENCV_FLANN_KMEANS_INDEX_H_ + +#include +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" +#include "logger.h" + + +namespace cvflann +{ + +struct KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * Hierarchical kmeans index + * + * Contains a tree constructed through a hierarchical kmeans clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class KMeansIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + + typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = indices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = indices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + assert(index >=0 && index < n); + centers[0] = indices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = -1; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) newPot += std::min( distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols), closestDistSq[i] ); + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = indices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols), closestDistSq[i] ); + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + + +public: + + flann_algorithm_t getType() const + { + return FLANN_INDEX_KMEANS; + } + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + KMeansIndex(const Matrix& inputData, const IndexParams& params = KMeansIndexParams(), + Distance d = Distance()) + : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d) + { + memoryCounter_ = 0; + + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + branching_ = get_param(params,"branching",32); + iterations_ = get_param(params,"iterations",11); + if (iterations_<0) { + iterations_ = (std::numeric_limits::max)(); + } + centers_init_ = get_param(params,"centers_init",FLANN_CENTERS_RANDOM); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &KMeansIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &KMeansIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &KMeansIndex::chooseCentersKMeanspp; + } + else { + throw FLANNException("Unknown algorithm for choosing initial centers."); + } + cb_index_ = 0.4f; + + } + + + KMeansIndex(const KMeansIndex&); + KMeansIndex& operator=(const KMeansIndex&); + + + /** + * Index destructor. + * + * Release the memory used by the index. + */ + virtual ~KMeansIndex() + { + if (root_ != NULL) { + free_centers(root_); + } + if (indices_!=NULL) { + delete[] indices_; + } + } + + /** + * Returns size of index. + */ + size_t size() const + { + return size_; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return veclen_; + } + + + void set_cb_index( float index) + { + cb_index_ = index; + } + + /** + * Computes the inde memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return pool_.usedMemory+pool_.wastedMemory+memoryCounter_; + } + + /** + * Builds the index + */ + void buildIndex() + { + if (branching_<2) { + throw FLANNException("Branching factor must be at least 2"); + } + + indices_ = new int[size_]; + for (size_t i=0; i(); + computeNodeStatistics(root_, indices_, (int)size_); + computeClustering(root_, indices_, (int)size_, branching_,0); + } + + + void saveIndex(FILE* stream) + { + save_value(stream, branching_); + save_value(stream, iterations_); + save_value(stream, memoryCounter_); + save_value(stream, cb_index_); + save_value(stream, *indices_, (int)size_); + + save_tree(stream, root_); + } + + + void loadIndex(FILE* stream) + { + load_value(stream, branching_); + load_value(stream, iterations_); + load_value(stream, memoryCounter_); + load_value(stream, cb_index_); + if (indices_!=NULL) { + delete[] indices_; + } + indices_ = new int[size_]; + load_value(stream, *indices_, size_); + + if (root_!=NULL) { + free_centers(root_); + } + load_tree(stream, root_); + + index_params_["algorithm"] = getType(); + index_params_["branching"] = branching_; + index_params_["iterations"] = iterations_; + index_params_["centers_init"] = centers_init_; + index_params_["cb_index"] = cb_index_; + + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * searchParams = parameters that influence the search algorithm (checks, cb_index) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + + int maxChecks = get_param(searchParams,"checks",32); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + findExactNN(root_, result, vec); + } + else { + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + int checks = 0; + findNN(root_, result, vec, checks, maxChecks, heap); + + BranchSt branch; + while (heap->popMin(branch) && (checks& centers) + { + int numClusters = centers.rows; + if (numClusters<1) { + throw FLANNException("Number of clusters must be at least 1"); + } + + DistanceType variance; + KMeansNodePtr* clusters = new KMeansNodePtr[numClusters]; + + int clusterCount = getMinVarianceClusters(root_, clusters, numClusters, variance); + + Logger::info("Clusters requested: %d, returning %d\n",numClusters, clusterCount); + + for (int i=0; ipivot; + for (size_t j=0; j BranchSt; + + + + + void save_tree(FILE* stream, KMeansNodePtr node) + { + save_value(stream, *node); + save_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices_); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i]); + } + } + } + + + void load_tree(FILE* stream, KMeansNodePtr& node) + { + node = pool_.allocate(); + load_value(stream, *node); + node->pivot = new DistanceType[veclen_]; + load_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices_ + indices_offset; + } + else { + node->childs = pool_.allocate(branching_); + for(int i=0; ichilds[i]); + } + } + } + + + /** + * Helper function + */ + void free_centers(KMeansNodePtr node) + { + delete[] node->pivot; + if (node->childs!=NULL) { + for (int k=0; kchilds[k]); + } + } + } + + /** + * Computes the statistics of a node (mean, radius, variance). + * + * Params: + * node = the node to use + * indices = the indices of the points belonging to the node + */ + void computeNodeStatistics(KMeansNodePtr node, int* indices, int indices_length) + { + + DistanceType radius = 0; + DistanceType variance = 0; + DistanceType* mean = new DistanceType[veclen_]; + memoryCounter_ += int(veclen_*sizeof(DistanceType)); + + memset(mean,0,veclen_*sizeof(DistanceType)); + + for (size_t i=0; i(), veclen_); + } + for (size_t j=0; j(), veclen_); + + DistanceType tmp = 0; + for (int i=0; iradius) { + radius = tmp; + } + } + + node->variance = variance; + node->radius = radius; + node->pivot = mean; + } + + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < branching) { + node->indices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + int* centers_idx = new int[branching]; + int centers_length; + (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length); + + if (centers_lengthindices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + delete [] centers_idx; + return; + } + + + Matrix dcenters(new double[branching*veclen_],branching,veclen_); + for (int i=0; i radiuses(branching); + int* count = new int[branching]; + for (int i=0; inew_sq_dist) { + belongs_to[i] = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[belongs_to[i]]) { + radiuses[belongs_to[i]] = sq_dist; + } + count[belongs_to[i]]++; + } + + bool converged = false; + int iteration = 0; + while (!converged && iterationnew_sq_dist) { + new_centroid = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[new_centroid]) { + radiuses[new_centroid] = sq_dist; + } + if (new_centroid != belongs_to[i]) { + count[belongs_to[i]]--; + count[new_centroid]++; + belongs_to[i] = new_centroid; + + converged = false; + } + } + + for (int i=0; ichilds = pool_.allocate(branching); + int start = 0; + int end = start; + for (int c=0; c(), veclen_); + variance += d; + mean_radius += sqrt(d); + std::swap(indices[i],indices[end]); + std::swap(belongs_to[i],belongs_to[end]); + end++; + } + } + variance /= s; + mean_radius /= s; + variance -= distance_(centers[c], ZeroIterator(), veclen_); + + node->childs[c] = pool_.allocate(); + node->childs[c]->radius = radiuses[c]; + node->childs[c]->pivot = centers[c]; + node->childs[c]->variance = variance; + node->childs[c]->mean_radius = mean_radius; + node->childs[c]->indices = NULL; + computeClustering(node->childs[c],indices+start, end-start, branching, level+1); + start=end; + } + + delete[] dcenters.data; + delete[] centers; + delete[] count; + delete[] belongs_to; + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + DistanceType val = bsq-rsq-wsq; + DistanceType val2 = val*val-4*rsq*wsq; + + //if (val>0) { + if ((val>0)&&(val2>0)) { + return; + } + } + + if (node->childs==NULL) { + if (checks>=maxChecks) { + if (result.full()) return; + } + checks += node->size; + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int closest_center = exploreNodeBranches(node, vec, domain_distances, heap); + delete[] domain_distances; + findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap); + } + } + + /** + * Helper function that computes the nearest childs of a node to a given query point. + * Params: + * node = the node + * q = the query point + * distances = array with the distances to each child node. + * Returns: + */ + int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap* heap) + { + + int best_index = 0; + domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_); + for (int i=1; ichilds[i]->pivot, veclen_); + if (domain_distances[i]childs[best_index]->pivot; + for (int i=0; ichilds[i]->variance; + + // float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q); + // if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + + return best_index; + } + + + /** + * Function the performs exact nearest neighbor search by traversing the entire tree. + */ + void findExactNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + DistanceType val = bsq-rsq-wsq; + DistanceType val2 = val*val-4*rsq*wsq; + + // if (val>0) { + if ((val>0)&&(val2>0)) { + return; + } + } + + + if (node->childs==NULL) { + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + int* sort_indices = new int[branching_]; + + getCenterOrdering(node, vec, sort_indices); + + for (int i=0; ichilds[sort_indices[i]],result,vec); + } + + delete[] sort_indices; + } + } + + + /** + * Helper function. + * + * I computes the order in which to traverse the child nodes of a particular node. + */ + void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices) + { + DistanceType* domain_distances = new DistanceType[branching_]; + for (int i=0; ichilds[i]->pivot, veclen_); + + int j=0; + while (domain_distances[j]j; --k) { + domain_distances[k] = domain_distances[k-1]; + sort_indices[k] = sort_indices[k-1]; + } + domain_distances[j] = dist; + sort_indices[j] = i; + } + delete[] domain_distances; + } + + /** + * Method that computes the squared distance from the query point q + * from inside region with center c to the border between this + * region and the region with center p + */ + DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q) + { + DistanceType sum = 0; + DistanceType sum2 = 0; + + for (int i=0; ivariance*root->size; + + while (clusterCount::max)(); + int splitIndex = -1; + + for (int i=0; ichilds != NULL) { + + DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size; + + for (int j=0; jchilds[j]->variance*clusters[i]->childs[j]->size; + } + if (variance clusters_length) break; + + meanVariance = minVariance; + + // split node + KMeansNodePtr toSplit = clusters[splitIndex]; + clusters[splitIndex] = toSplit->childs[0]; + for (int i=1; ichilds[i]; + } + } + + varianceValue = meanVariance/root->size; + return clusterCount; + } + +private: + /** The branching factor used in the hierarchical k-means clustering */ + int branching_; + + /** Maximum number of iterations to use when performing k-means clustering */ + int iterations_; + + /** Algorithm for choosing the cluster centers */ + flann_centers_init_t centers_init_; + + /** + * Cluster border index. This is used in the tree search phase when determining + * the closest cluster to explore next. A zero value takes into account only + * the cluster centres, a value greater then zero also take into account the size + * of the cluster. + */ + float cb_index_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** Index parameters */ + IndexParams index_params_; + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + KMeansNodePtr root_; + + /** + * Array of indices to vectors in the dataset. + */ + int* indices_; + + /** + * The distance + */ + Distance distance_; + + /** + * Pooled memory allocator. + */ + PooledAllocator pool_; + + /** + * Memory occupied by the index. + */ + int memoryCounter_; +}; + +} + +#endif //OPENCV_FLANN_KMEANS_INDEX_H_ diff --git a/OpenCV/Headers/flann/linear_index.h b/OpenCV/Headers/flann/linear_index.h new file mode 100644 index 0000000000..5aa7a5cfac --- /dev/null +++ b/OpenCV/Headers/flann/linear_index.h @@ -0,0 +1,132 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LINEAR_INDEX_H_ +#define OPENCV_FLANN_LINEAR_INDEX_H_ + +#include "general.h" +#include "nn_index.h" + +namespace cvflann +{ + +struct LinearIndexParams : public IndexParams +{ + LinearIndexParams() + { + (* this)["algorithm"] = FLANN_INDEX_LINEAR; + } +}; + +template +class LinearIndex : public NNIndex +{ +public: + + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + LinearIndex(const Matrix& inputData, const IndexParams& params = LinearIndexParams(), + Distance d = Distance()) : + dataset_(inputData), index_params_(params), distance_(d) + { + } + + LinearIndex(const LinearIndex&); + LinearIndex& operator=(const LinearIndex&); + + flann_algorithm_t getType() const + { + return FLANN_INDEX_LINEAR; + } + + + size_t size() const + { + return dataset_.rows; + } + + size_t veclen() const + { + return dataset_.cols; + } + + + int usedMemory() const + { + return 0; + } + + void buildIndex() + { + /* nothing to do here for linear search */ + } + + void saveIndex(FILE*) + { + /* nothing to do here for linear search */ + } + + + void loadIndex(FILE*) + { + /* nothing to do here for linear search */ + + index_params_["algorithm"] = getType(); + } + + void findNeighbors(ResultSet& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/) + { + ElementType* data = dataset_.data; + for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) { + DistanceType dist = distance_(data, vec, dataset_.cols); + resultSet.addPoint(dist, (int)i); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + +private: + /** The dataset */ + const Matrix dataset_; + /** Index parameters */ + IndexParams index_params_; + /** Index distance */ + Distance distance_; + +}; + +} + +#endif // OPENCV_FLANN_LINEAR_INDEX_H_ diff --git a/OpenCV/Headers/flann/logger.h b/OpenCV/Headers/flann/logger.h new file mode 100644 index 0000000000..24f3fb6988 --- /dev/null +++ b/OpenCV/Headers/flann/logger.h @@ -0,0 +1,130 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LOGGER_H +#define OPENCV_FLANN_LOGGER_H + +#include +#include + +#include "defines.h" + + +namespace cvflann +{ + +class Logger +{ + Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {} + + ~Logger() + { + if ((stream!=NULL)&&(stream!=stdout)) { + fclose(stream); + } + } + + static Logger& instance() + { + static Logger logger; + return logger; + } + + void _setDestination(const char* name) + { + if (name==NULL) { + stream = stdout; + } + else { + stream = fopen(name,"w"); + if (stream == NULL) { + stream = stdout; + } + } + } + + int _log(int level, const char* fmt, va_list arglist) + { + if (level > logLevel ) return -1; + int ret = vfprintf(stream, fmt, arglist); + return ret; + } + +public: + /** + * Sets the logging level. All messages with lower priority will be ignored. + * @param level Logging level + */ + static void setLevel(int level) { instance().logLevel = level; } + + /** + * Sets the logging destination + * @param name Filename or NULL for console + */ + static void setDestination(const char* name) { instance()._setDestination(name); } + + /** + * Print log message + * @param level Log level + * @param fmt Message format + * @return + */ + static int log(int level, const char* fmt, ...) + { + va_list arglist; + va_start(arglist, fmt); + int ret = instance()._log(level,fmt,arglist); + va_end(arglist); + return ret; + } + +#define LOG_METHOD(NAME,LEVEL) \ + static int NAME(const char* fmt, ...) \ + { \ + va_list ap; \ + va_start(ap, fmt); \ + int ret = instance()._log(LEVEL, fmt, ap); \ + va_end(ap); \ + return ret; \ + } + + LOG_METHOD(fatal, FLANN_LOG_FATAL) + LOG_METHOD(error, FLANN_LOG_ERROR) + LOG_METHOD(warn, FLANN_LOG_WARN) + LOG_METHOD(info, FLANN_LOG_INFO) + +private: + FILE* stream; + int logLevel; +}; + +} + +#endif //OPENCV_FLANN_LOGGER_H diff --git a/OpenCV/Headers/flann/lsh_index.h b/OpenCV/Headers/flann/lsh_index.h new file mode 100644 index 0000000000..4d4670ea50 --- /dev/null +++ b/OpenCV/Headers/flann/lsh_index.h @@ -0,0 +1,392 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_INDEX_H_ +#define OPENCV_FLANN_LSH_INDEX_H_ + +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "lsh_table.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct LshIndexParams : public IndexParams +{ + LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) + { + (* this)["algorithm"] = FLANN_INDEX_LSH; + // The number of hash tables to use + (*this)["table_number"] = table_number; + // The length of the key in the hash tables + (*this)["key_size"] = key_size; + // Number of levels to use in multi-probe (0 for standard LSH) + (*this)["multi_probe_level"] = multi_probe_level; + } +}; + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class LshIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** Constructor + * @param input_data dataset with the input features + * @param params parameters passed to the LSH algorithm + * @param d the distance used + */ + LshIndex(const Matrix& input_data, const IndexParams& params = LshIndexParams(), + Distance d = Distance()) : + dataset_(input_data), index_params_(params), distance_(d) + { + // cv::flann::IndexParams sets integer params as 'int', so it is used with get_param + // in place of 'unsigned int' + table_number_ = (unsigned int)get_param(index_params_,"table_number",12); + key_size_ = (unsigned int)get_param(index_params_,"key_size",20); + multi_probe_level_ = (unsigned int)get_param(index_params_,"multi_probe_level",2); + + feature_size_ = (unsigned)dataset_.cols; + fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); + } + + + LshIndex(const LshIndex&); + LshIndex& operator=(const LshIndex&); + + /** + * Builds the index + */ + void buildIndex() + { + tables_.resize(table_number_); + for (unsigned int i = 0; i < table_number_; ++i) { + lsh::LshTable& table = tables_[i]; + table = lsh::LshTable(feature_size_, key_size_); + + // Add the features to the table + table.add(dataset_); + } + } + + flann_algorithm_t getType() const + { + return FLANN_INDEX_LSH; + } + + + void saveIndex(FILE* stream) + { + save_value(stream,table_number_); + save_value(stream,key_size_); + save_value(stream,multi_probe_level_); + save_value(stream, dataset_); + } + + void loadIndex(FILE* stream) + { + load_value(stream, table_number_); + load_value(stream, key_size_); + load_value(stream, multi_probe_level_); + load_value(stream, dataset_); + // Building the index is so fast we can afford not storing it + buildIndex(); + + index_params_["algorithm"] = getType(); + index_params_["table_number"] = table_number_; + index_params_["key_size"] = key_size_; + index_params_["multi_probe_level"] = multi_probe_level_; + } + + /** + * Returns size of index. + */ + size_t size() const + { + return dataset_.rows; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return feature_size_; + } + + /** + * Computes the index memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return (int)(dataset_.rows * sizeof(int)); + } + + + IndexParams getParameters() const + { + return index_params_; + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + + + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + std::fill_n(indices[i], knn, -1); + std::fill_n(dists[i], knn, std::numeric_limits::max()); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& /*searchParams*/) + { + getNeighbors(vec, result); + } + +private: + /** Defines the comparator on score and index + */ + typedef std::pair ScoreIndexPair; + struct SortScoreIndexPairOnSecond + { + bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const + { + return left.second < right.second; + } + }; + + /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH + * @param key the key we build neighbors from + * @param lowest_index the lowest index of the bit set + * @param level the multi-probe level we are at + * @param xor_masks all the xor mask + */ + void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, + std::vector& xor_masks) + { + xor_masks.push_back(key); + if (level == 0) return; + for (int index = lowest_index - 1; index >= 0; --index) { + // Create a new key + lsh::BucketKey new_key = key | (1 << index); + fill_xor_mask(new_key, index, level - 1, xor_masks); + } + } + + /** Performs the approximate nearest-neighbor search. + * @param vec the feature to analyze + * @param do_radius flag indicating if we check the radius too + * @param radius the radius if it is a radius search + * @param do_k flag indicating if we limit the number of nn + * @param k_nn the number of nearest neighbors + * @param checked_average used for debugging + */ + void getNeighbors(const ElementType* vec, bool /*do_radius*/, float radius, bool do_k, unsigned int k_nn, + float& /*checked_average*/) + { + static std::vector score_index_heap; + + if (do_k) { + unsigned int worst_score = std::numeric_limits::max(); + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + + if (hamming_distance < worst_score) { + // Insert the new element + score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + std::push_heap(score_index_heap.begin(), score_index_heap.end()); + + if (score_index_heap.size() > (unsigned int)k_nn) { + // Remove the highest distance value as we have too many elements + std::pop_heap(score_index_heap.begin(), score_index_heap.end()); + score_index_heap.pop_back(); + // Keep track of the worst score + worst_score = score_index_heap.front().first; + } + } + } + } + } + } + else { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + } + } + } + } + } + + /** Performs the approximate nearest-neighbor search. + * This is a slower version than the above as it uses the ResultSet + * @param vec the feature to analyze + */ + void getNeighbors(const ElementType* vec, ResultSet& result) + { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey((lsh::BucketKey)sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], (int)dataset_.cols); + result.addPoint(hamming_distance, *training_index); + } + } + } + } + + /** The different hash tables */ + std::vector > tables_; + + /** The data the LSH tables where built from */ + Matrix dataset_; + + /** The size of the features (as ElementType[]) */ + unsigned int feature_size_; + + IndexParams index_params_; + + /** table number */ + unsigned int table_number_; + /** key size */ + unsigned int key_size_; + /** How far should we look for neighbors in multi-probe LSH */ + unsigned int multi_probe_level_; + + /** The XOR masks to apply to a key to get the neighboring buckets */ + std::vector xor_masks_; + + Distance distance_; +}; +} + +#endif //OPENCV_FLANN_LSH_INDEX_H_ diff --git a/OpenCV/Headers/flann/lsh_table.h b/OpenCV/Headers/flann/lsh_table.h new file mode 100644 index 0000000000..126fb2a992 --- /dev/null +++ b/OpenCV/Headers/flann/lsh_table.h @@ -0,0 +1,482 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_TABLE_H_ +#define OPENCV_FLANN_LSH_TABLE_H_ + +#include +#include +#include +#include +// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP +#ifdef __GXX_EXPERIMENTAL_CXX0X__ +# define USE_UNORDERED_MAP 1 +#else +# define USE_UNORDERED_MAP 0 +#endif +#if USE_UNORDERED_MAP +#include +#else +#include +#endif +#include +#include + +#include "dynamic_bitset.h" +#include "matrix.h" + +namespace cvflann +{ + +namespace lsh +{ + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** What is stored in an LSH bucket + */ +typedef uint32_t FeatureIndex; +/** The id from which we can get a bucket back in an LSH table + */ +typedef unsigned int BucketKey; + +/** A bucket in an LSH table + */ +typedef std::vector Bucket; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** POD for stats about an LSH table + */ +struct LshStats +{ + std::vector bucket_sizes_; + size_t n_buckets_; + size_t bucket_size_mean_; + size_t bucket_size_median_; + size_t bucket_size_min_; + size_t bucket_size_max_; + size_t bucket_size_std_dev; + /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin + */ + std::vector > size_histogram_; +}; + +/** Overload the << operator for LshStats + * @param out the streams + * @param stats the stats to display + * @return the streams + */ +inline std::ostream& operator <<(std::ostream& out, const LshStats& stats) +{ + int w = 20; + out << "Lsh Table Stats:\n" << std::setw(w) << std::setiosflags(std::ios::right) << "N buckets : " + << stats.n_buckets_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "mean size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "median size : " << stats.bucket_size_median_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "min size : " << std::setiosflags(std::ios::left) + << stats.bucket_size_min_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "max size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_max_; + + // Display the histogram + out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << "histogram : " + << std::setiosflags(std::ios::left); + for (std::vector >::const_iterator iterator = stats.size_histogram_.begin(), end = + stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << "-" << (*iterator)[1] << ": " << (*iterator)[2] << ", "; + + return out; +} + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Lsh hash table. As its key is a sub-feature, and as usually + * the size of it is pretty small, we keep it as a continuous memory array. + * The value is an index in the corpus of features (we keep it as an unsigned + * int for pure memory reasons, it could be a size_t) + */ +template +class LshTable +{ +public: + /** A container of all the feature indices. Optimized for space + */ +#if USE_UNORDERED_MAP + typedef std::unordered_map BucketsSpace; +#else + typedef std::map BucketsSpace; +#endif + + /** A container of all the feature indices. Optimized for speed + */ + typedef std::vector BucketsSpeed; + + /** Default constructor + */ + LshTable() + { + } + + /** Default constructor + * Create the mask and allocate the memory + * @param feature_size is the size of the feature (considered as a ElementType[]) + * @param key_size is the number of bits that are turned on in the feature + */ + LshTable(unsigned int /*feature_size*/, unsigned int /*key_size*/) + { + std::cerr << "LSH is not implemented for that type" << std::endl; + assert(0); + } + + /** Add a feature to the table + * @param value the value to store for that feature + * @param feature the feature itself + */ + void add(unsigned int value, const ElementType* feature) + { + // Add the value to the corresponding bucket + BucketKey key = (lsh::BucketKey)getKey(feature); + + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + buckets_speed_[key].push_back(value); + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + key_bitset_.set(key); + buckets_space_[key].push_back(value); + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + buckets_space_[key].push_back(value); + break; + } + } + } + + /** Add a set of features to the table + * @param dataset the values to store + */ + void add(Matrix dataset) + { +#if USE_UNORDERED_MAP + buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2); +#endif + // Add the features to the table + for (unsigned int i = 0; i < dataset.rows; ++i) add(i, dataset[i]); + // Now that the table is full, optimize it for speed/space + optimize(); + } + + /** Get a bucket given the key + * @param key + * @return + */ + inline const Bucket* getBucketFromKey(BucketKey key) const + { + // Generate other buckets + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + return &buckets_speed_[key]; + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + if (key_bitset_.test(key)) return &buckets_space_.find(key)->second; + else return 0; + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end(); + bucket_it = buckets_space_.find(key); + // Stop here if that bucket does not exist + if (bucket_it == bucket_end) return 0; + else return &bucket_it->second; + break; + } + } + return 0; + } + + /** Compute the sub-signature of a feature + */ + size_t getKey(const ElementType* /*feature*/) const + { + std::cerr << "LSH is not implemented for that type" << std::endl; + assert(0); + return 1; + } + + /** Get statistics about the table + * @return + */ + LshStats getStats() const; + +private: + /** defines the speed fo the implementation + * kArray uses a vector for storing data + * kBitsetHash uses a hash map but checks for the validity of a key with a bitset + * kHash uses a hash map only + */ + enum SpeedLevel + { + kArray, kBitsetHash, kHash + }; + + /** Initialize some variables + */ + void initialize(size_t key_size) + { + speed_level_ = kHash; + key_size_ = (unsigned)key_size; + } + + /** Optimize the table for speed/space + */ + void optimize() + { + // If we are already using the fast storage, no need to do anything + if (speed_level_ == kArray) return; + + // Use an array if it will be more than half full + if (buckets_space_.size() > (unsigned int)((1 << key_size_) / 2)) { + speed_level_ = kArray; + // Fill the array version of it + buckets_speed_.resize(1 << key_size_); + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second; + + // Empty the hash table + buckets_space_.clear(); + return; + } + + // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two + // for the vector) or less than 512MB (key_size_ <= 30) + if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10 + >= size_t(1 << key_size_)) || (key_size_ <= 32)) { + speed_level_ = kBitsetHash; + key_bitset_.resize(1 << key_size_); + key_bitset_.reset(); + // Try with the BucketsSpace + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first); + } + else { + speed_level_ = kHash; + key_bitset_.clear(); + } + } + + /** The vector of all the buckets if they are held for speed + */ + BucketsSpeed buckets_speed_; + + /** The hash table of all the buckets in case we cannot use the speed version + */ + BucketsSpace buckets_space_; + + /** What is used to store the data */ + SpeedLevel speed_level_; + + /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset + * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset) + */ + DynamicBitset key_bitset_; + + /** The size of the sub-signature in bits + */ + unsigned int key_size_; + + // Members only used for the unsigned char specialization + /** The mask to apply to a feature to get the hash key + * Only used in the unsigned char case + */ + std::vector mask_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Specialization for unsigned char + +template<> +inline LshTable::LshTable(unsigned int feature_size, unsigned int subsignature_size) +{ + initialize(subsignature_size); + // Allocate the mask + mask_ = std::vector((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0); + + // A bit brutal but fast to code + std::vector indices(feature_size * CHAR_BIT); + for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = i; + std::random_shuffle(indices.begin(), indices.end()); + + // Generate a random set of order of subsignature_size_ bits + for (unsigned int i = 0; i < key_size_; ++i) { + size_t index = indices[i]; + + // Set that bit in the mask + size_t divisor = CHAR_BIT * sizeof(size_t); + size_t idx = index / divisor; //pick the right size_t index + mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset + } + + // Set to 1 if you want to display the mask for debug +#if 0 + { + size_t bcount = 0; + BOOST_FOREACH(size_t mask_block, mask_){ + out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block + << std::endl; + bcount += __builtin_popcountll(mask_block); + } + out << "bit count : " << std::dec << bcount << std::endl; + out << "mask size : " << mask_.size() << std::endl; + return out; + } +#endif +} + +/** Return the Subsignature of a feature + * @param feature the feature to analyze + */ +template<> +inline size_t LshTable::getKey(const unsigned char* feature) const +{ + // no need to check if T is dividable by sizeof(size_t) like in the Hamming + // distance computation as we have a mask + const size_t* feature_block_ptr = reinterpret_cast (feature); + + // Figure out the subsignature of the feature + // Given the feature ABCDEF, and the mask 001011, the output will be + // 000CEF + size_t subsignature = 0; + size_t bit_index = 1; + + for (std::vector::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) { + // get the mask and signature blocks + size_t feature_block = *feature_block_ptr; + size_t mask_block = *pmask_block; + while (mask_block) { + // Get the lowest set bit in the mask block + size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block); + // Add it to the current subsignature if necessary + subsignature += (feature_block & lowest_bit) ? bit_index : 0; + // Reset the bit in the mask block + mask_block ^= lowest_bit; + // increment the bit index for the subsignature + bit_index <<= 1; + } + // Check the next feature block + ++feature_block_ptr; + } + return subsignature; +} + +template<> +inline LshStats LshTable::getStats() const +{ + LshStats stats; + stats.bucket_size_mean_ = 0; + if ((buckets_speed_.empty()) && (buckets_space_.empty())) { + stats.n_buckets_ = 0; + stats.bucket_size_median_ = 0; + stats.bucket_size_min_ = 0; + stats.bucket_size_max_ = 0; + return stats; + } + + if (!buckets_speed_.empty()) { + for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)pbucket->size()); + stats.bucket_size_mean_ += pbucket->size(); + } + stats.bucket_size_mean_ /= buckets_speed_.size(); + stats.n_buckets_ = buckets_speed_.size(); + } + else { + for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)x->second.size()); + stats.bucket_size_mean_ += x->second.size(); + } + stats.bucket_size_mean_ /= buckets_space_.size(); + stats.n_buckets_ = buckets_space_.size(); + } + + std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end()); + + // BOOST_FOREACH(int size, stats.bucket_sizes_) + // std::cout << size << " "; + // std::cout << std::endl; + stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2]; + stats.bucket_size_min_ = stats.bucket_sizes_.front(); + stats.bucket_size_max_ = stats.bucket_sizes_.back(); + + // TODO compute mean and std + /*float mean, stddev; + stats.bucket_size_mean_ = mean; + stats.bucket_size_std_dev = stddev;*/ + + // Include a histogram of the buckets + unsigned int bin_start = 0; + unsigned int bin_end = 20; + bool is_new_bin = true; + for (std::vector::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator + != end; ) + if (*iterator < bin_end) { + if (is_new_bin) { + stats.size_histogram_.push_back(std::vector(3, 0)); + stats.size_histogram_.back()[0] = bin_start; + stats.size_histogram_.back()[1] = bin_end - 1; + is_new_bin = false; + } + ++stats.size_histogram_.back()[2]; + ++iterator; + } + else { + bin_start += 20; + bin_end += 20; + is_new_bin = true; + } + + return stats; +} + +// End the two namespaces +} +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#endif /* OPENCV_FLANN_LSH_TABLE_H_ */ diff --git a/OpenCV/Headers/flann/matrix.h b/OpenCV/Headers/flann/matrix.h new file mode 100644 index 0000000000..51b6c6352c --- /dev/null +++ b/OpenCV/Headers/flann/matrix.h @@ -0,0 +1,116 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DATASET_H_ +#define OPENCV_FLANN_DATASET_H_ + +#include + +#include "general.h" + +namespace cvflann +{ + +/** + * Class that implements a simple rectangular matrix stored in a memory buffer and + * provides convenient matrix-like access using the [] operators. + */ +template +class Matrix +{ +public: + typedef T type; + + size_t rows; + size_t cols; + size_t stride; + T* data; + + Matrix() : rows(0), cols(0), stride(0), data(NULL) + { + } + + Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) : + rows(rows_), cols(cols_), stride(stride_), data(data_) + { + if (stride==0) stride = cols; + } + + /** + * Convenience function for deallocating the storage data. + */ + FLANN_DEPRECATED void free() + { + fprintf(stderr, "The cvflann::Matrix::free() method is deprecated " + "and it does not do any memory deallocation any more. You are" + "responsible for deallocating the matrix memory (by doing" + "'delete[] matrix.data' for example)"); + } + + /** + * Operator that return a (pointer to a) row of the data. + */ + T* operator[](size_t index) const + { + return data+index*stride; + } +}; + + +class UntypedMatrix +{ +public: + size_t rows; + size_t cols; + void* data; + flann_datatype_t type; + + UntypedMatrix(void* data_, long rows_, long cols_) : + rows(rows_), cols(cols_), data(data_) + { + } + + ~UntypedMatrix() + { + } + + + template + Matrix as() + { + return Matrix((T*)data, rows, cols); + } +}; + + + +} + +#endif //OPENCV_FLANN_DATASET_H_ diff --git a/OpenCV/Headers/flann/miniflann.hpp b/OpenCV/Headers/flann/miniflann.hpp new file mode 100644 index 0000000000..18c908141d --- /dev/null +++ b/OpenCV/Headers/flann/miniflann.hpp @@ -0,0 +1,162 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _OPENCV_MINIFLANN_HPP_ +#define _OPENCV_MINIFLANN_HPP_ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/flann/defines.h" + +namespace cv +{ + +namespace flann +{ + +struct CV_EXPORTS IndexParams +{ + IndexParams(); + ~IndexParams(); + + std::string getString(const std::string& key, const std::string& defaultVal=std::string()) const; + int getInt(const std::string& key, int defaultVal=-1) const; + double getDouble(const std::string& key, double defaultVal=-1) const; + + void setString(const std::string& key, const std::string& value); + void setInt(const std::string& key, int value); + void setDouble(const std::string& key, double value); + void setFloat(const std::string& key, float value); + void setBool(const std::string& key, bool value); + void setAlgorithm(int value); + + void getAll(std::vector& names, + std::vector& types, + std::vector& strValues, + std::vector& numValues) const; + + void* params; +}; + +struct CV_EXPORTS KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees=4); +}; + +struct CV_EXPORTS LinearIndexParams : public IndexParams +{ + LinearIndexParams(); +}; + +struct CV_EXPORTS CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); +}; + +struct CV_EXPORTS AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, + float memory_weight = 0, float sample_fraction = 0.1); +}; + +struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, int trees = 4, int leaf_size = 100 ); +}; + +struct CV_EXPORTS KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); +}; + +struct CV_EXPORTS LshIndexParams : public IndexParams +{ + LshIndexParams(int table_number, int key_size, int multi_probe_level); +}; + +struct CV_EXPORTS SavedIndexParams : public IndexParams +{ + SavedIndexParams(const std::string& filename); +}; + +struct CV_EXPORTS SearchParams : public IndexParams +{ + SearchParams( int checks = 32, float eps = 0, bool sorted = true ); +}; + +class CV_EXPORTS_W Index +{ +public: + CV_WRAP Index(); + CV_WRAP Index(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + virtual ~Index(); + + CV_WRAP virtual void build(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + CV_WRAP virtual void knnSearch(InputArray query, OutputArray indices, + OutputArray dists, int knn, const SearchParams& params=SearchParams()); + + CV_WRAP virtual int radiusSearch(InputArray query, OutputArray indices, + OutputArray dists, double radius, int maxResults, + const SearchParams& params=SearchParams()); + + CV_WRAP virtual void save(const std::string& filename) const; + CV_WRAP virtual bool load(InputArray features, const std::string& filename); + CV_WRAP virtual void release(); + CV_WRAP cvflann::flann_distance_t getDistance() const; + CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const; + +protected: + cvflann::flann_distance_t distType; + cvflann::flann_algorithm_t algo; + int featureType; + void* index; +}; + +} } // namespace cv::flann + +#endif // __cplusplus + +#endif diff --git a/OpenCV/Headers/flann/nn_index.h b/OpenCV/Headers/flann/nn_index.h new file mode 100644 index 0000000000..d14e83a92c --- /dev/null +++ b/OpenCV/Headers/flann/nn_index.h @@ -0,0 +1,179 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_NNINDEX_H +#define OPENCV_FLANN_NNINDEX_H + +#include + +#include "general.h" +#include "matrix.h" +#include "result_set.h" +#include "params.h" + +namespace cvflann +{ + +/** + * Nearest-neighbour index base class + */ +template +class NNIndex +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +public: + + virtual ~NNIndex() {} + + /** + * \brief Builds the index + */ + virtual void buildIndex() = 0; + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + +#if 0 + KNNResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } +#else + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } +#endif + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + virtual int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + if (query.rows != 1) { + fprintf(stderr, "I can only search one feature at a time for range search\n"); + return -1; + } + assert(query.cols == veclen()); + assert(indices.cols == dists.cols); + + int n = 0; + int* indices_ptr = NULL; + DistanceType* dists_ptr = NULL; + if (indices.cols > 0) { + n = (int)indices.cols; + indices_ptr = indices[0]; + dists_ptr = dists[0]; + } + + RadiusUniqueResultSet resultSet((DistanceType)radius); + resultSet.clear(); + findNeighbors(resultSet, query[0], params); + if (n>0) { + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n); + else resultSet.copy(indices_ptr, dists_ptr, n); + } + + return (int)resultSet.size(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) = 0; + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) = 0; + + /** + * \returns number of features in this index. + */ + virtual size_t size() const = 0; + + /** + * \returns The dimensionality of the features in this index. + */ + virtual size_t veclen() const = 0; + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const = 0; + + /** + * \returns The index type (kdtree, kmeans,...) + */ + virtual flann_algorithm_t getType() const = 0; + + /** + * \returns The index parameters + */ + virtual IndexParams getParameters() const = 0; + + + /** + * \brief Method that searches for nearest-neighbours + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) = 0; +}; + +} + +#endif //OPENCV_FLANN_NNINDEX_H diff --git a/OpenCV/Headers/flann/object_factory.h b/OpenCV/Headers/flann/object_factory.h new file mode 100644 index 0000000000..7f971c5a31 --- /dev/null +++ b/OpenCV/Headers/flann/object_factory.h @@ -0,0 +1,91 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_ +#define OPENCV_FLANN_OBJECT_FACTORY_H_ + +#include + +namespace cvflann +{ + +class CreatorNotFound +{ +}; + +template +class ObjectFactory +{ + typedef ObjectFactory ThisClass; + typedef std::map ObjectRegistry; + + // singleton class, private constructor + ObjectFactory() {} + +public: + + bool subscribe(UniqueIdType id, ObjectCreator creator) + { + if (object_registry.find(id) != object_registry.end()) return false; + + object_registry[id] = creator; + return true; + } + + bool unregister(UniqueIdType id) + { + return object_registry.erase(id) == 1; + } + + ObjectCreator create(UniqueIdType id) + { + typename ObjectRegistry::const_iterator iter = object_registry.find(id); + + if (iter == object_registry.end()) { + throw CreatorNotFound(); + } + + return iter->second; + } + + static ThisClass& instance() + { + static ThisClass the_factory; + return the_factory; + } +private: + ObjectRegistry object_registry; +}; + +} + +#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */ diff --git a/OpenCV/Headers/flann/params.h b/OpenCV/Headers/flann/params.h new file mode 100644 index 0000000000..fc2a906198 --- /dev/null +++ b/OpenCV/Headers/flann/params.h @@ -0,0 +1,96 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_PARAMS_H_ +#define OPENCV_FLANN_PARAMS_H_ + +#include "any.h" +#include "general.h" +#include +#include + + +namespace cvflann +{ + +typedef std::map IndexParams; + +struct SearchParams : public IndexParams +{ + SearchParams(int checks = 32, float eps = 0, bool sorted = true ) + { + // how many leafs to visit when searching for neighbours (-1 for unlimited) + (*this)["checks"] = checks; + // search for eps-approximate neighbours (default: 0) + (*this)["eps"] = eps; + // only for radius search, require neighbours sorted by distance (default: true) + (*this)["sorted"] = sorted; + } +}; + + +template +T get_param(const IndexParams& params, std::string name, const T& default_value) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + return default_value; + } +} + +template +T get_param(const IndexParams& params, std::string name) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + throw FLANNException(std::string("Missing parameter '")+name+std::string("' in the parameters given")); + } +} + +inline void print_params(const IndexParams& params) +{ + IndexParams::const_iterator it; + + for(it=params.begin(); it!=params.end(); ++it) { + std::cout << it->first << " : " << it->second << std::endl; + } +} + + + +} + + +#endif /* OPENCV_FLANN_PARAMS_H_ */ diff --git a/OpenCV/Headers/flann/random.h b/OpenCV/Headers/flann/random.h new file mode 100644 index 0000000000..2a67352da3 --- /dev/null +++ b/OpenCV/Headers/flann/random.h @@ -0,0 +1,135 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RANDOM_H +#define OPENCV_FLANN_RANDOM_H + +#include +#include +#include + +#include "general.h" + +namespace cvflann +{ + +/** + * Seeds the random number generator + * @param seed Random seed + */ +inline void seed_random(unsigned int seed) +{ + srand(seed); +} + +/* + * Generates a random double value. + */ +/** + * Generates a random double value. + * @param high Upper limit + * @param low Lower limit + * @return Random double value + */ +inline double rand_double(double high = 1.0, double low = 0) +{ + return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0))); +} + +/** + * Generates a random integer value. + * @param high Upper limit + * @param low Lower limit + * @return Random integer value + */ +inline int rand_int(int high = RAND_MAX, int low = 0) +{ + return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0))); +} + +/** + * Random number generator that returns a distinct number from + * the [0,n) interval each time. + */ +class UniqueRandom +{ + std::vector vals_; + int size_; + int counter_; + +public: + /** + * Constructor. + * @param n Size of the interval from which to generate + * @return + */ + UniqueRandom(int n) + { + init(n); + } + + /** + * Initializes the number generator. + * @param n the size of the interval from which to generate random numbers. + */ + void init(int n) + { + // create and initialize an array of size n + vals_.resize(n); + size_ = n; + for (int i = 0; i < size_; ++i) vals_[i] = i; + + // shuffle the elements in the array + std::random_shuffle(vals_.begin(), vals_.end()); + + counter_ = 0; + } + + /** + * Return a distinct random integer in greater or equal to 0 and less + * than 'n' on each call. It should be called maximum 'n' times. + * Returns: a random integer + */ + int next() + { + if (counter_ == size_) { + return -1; + } + else { + return vals_[counter_++]; + } + } +}; + +} + +#endif //OPENCV_FLANN_RANDOM_H + + diff --git a/OpenCV/Headers/flann/result_set.h b/OpenCV/Headers/flann/result_set.h new file mode 100644 index 0000000000..7bb709b76e --- /dev/null +++ b/OpenCV/Headers/flann/result_set.h @@ -0,0 +1,543 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RESULTSET_H +#define OPENCV_FLANN_RESULTSET_H + +#include +#include +#include +#include +#include +#include + +namespace cvflann +{ + +/* This record represents a branch point when finding neighbors in + the tree. It contains a record of the minimum distance to the query + point, as well as the node at which the search resumes. + */ + +template +struct BranchStruct +{ + T node; /* Tree node at which search resumes */ + DistanceType mindist; /* Minimum distance to query for all nodes below. */ + + BranchStruct() {} + BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {} + + bool operator<(const BranchStruct& rhs) const + { + return mindist +class ResultSet +{ +public: + virtual ~ResultSet() {} + + virtual bool full() const = 0; + + virtual void addPoint(DistanceType dist, int index) = 0; + + virtual DistanceType worstDist() const = 0; + +}; + +/** + * KNNSimpleResultSet does not ensure that the element it holds are unique. + * Is used in those cases where the nearest neighbour algorithm used does not + * attempt to insert the same element multiple times. + */ +template +class KNNSimpleResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) + { + if (dist >= worst_distance_) return; + int i; + for (i=count; i>0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) +#else + if (dists[i-1]>dist) +#endif + { + if (i +class KNNResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) + { + if (dist >= worst_distance_) return; + int i; + for (i = count; i > 0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) ) +#else + if (dists[i-1]<=dist) +#endif + { + // Check for duplicate indices + int j = i - 1; + while ((j >= 0) && (dists[j] == dist)) { + if (indices[j] == index) { + return; + } + --j; + } + break; + } + } + + if (count < capacity) ++count; + for (int j = count-1; j > i; --j) { + dists[j] = dists[j-1]; + indices[j] = indices[j-1]; + } + dists[i] = dist; + indices[i] = index; + worst_distance_ = dists[capacity-1]; + } + + DistanceType worstDist() const + { + return worst_distance_; + } +}; + + +/** + * A result-set class used when performing a radius based search. + */ +template +class RadiusResultSet : public ResultSet +{ + DistanceType radius; + int* indices; + DistanceType* dists; + size_t capacity; + size_t count; + +public: + RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) : + radius(radius_), indices(indices_), dists(dists_), capacity(capacity_) + { + init(); + } + + ~RadiusResultSet() + { + } + + void init() + { + count = 0; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return true; + } + + void addPoint(DistanceType dist, int index) + { + if (dist0)&&(count < capacity)) { + dists[count] = dist; + indices[count] = index; + } + count++; + } + } + + DistanceType worstDist() const + { + return radius; + } + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class UniqueResultSet : public ResultSet +{ +public: + struct DistIndex + { + DistIndex(DistanceType dist, unsigned int index) : + dist_(dist), index_(index) + { + } + bool operator<(const DistIndex dist_index) const + { + return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_); + } + DistanceType dist_; + unsigned int index_; + }; + + /** Default cosntructor */ + UniqueResultSet() : + worst_distance_(std::numeric_limits::max()) + { + } + + /** Check the status of the set + * @return true if we have k NN + */ + inline bool full() const + { + return is_full_; + } + + /** Remove all elements in the set + */ + virtual void clear() = 0; + + /** Copy the set to two C arrays + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + if (n_neighbors < 0) { + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + else { + int i = 0; + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + } + + /** Copy the set to two C arrays but sort it according to the distance first + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + copy(indices, dist, n_neighbors); + } + + /** The number of neighbors in the set + * @return + */ + size_t size() const + { + return dist_indices_.size(); + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const + { + return worst_distance_; + } +protected: + /** Flag to say if the set is full */ + bool is_full_; + + /** The worst distance found so far */ + DistanceType worst_distance_; + + /** The best candidates so far */ + std::set dist_indices_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class KNNUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity) + { + this->is_full_ = false; + this->clear(); + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + inline void addPoint(DistanceType dist, int index) + { + // Don't do anything if we are worse than the worst + if (dist >= worst_distance_) return; + dist_indices_.insert(DistIndex(dist, index)); + + if (is_full_) { + if (dist_indices_.size() > capacity_) { + dist_indices_.erase(*dist_indices_.rbegin()); + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + else if (dist_indices_.size() == capacity_) { + is_full_ = true; + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = std::numeric_limits::max(); + is_full_ = false; + } + +protected: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::is_full_; + using UniqueResultSet::worst_distance_; + using UniqueResultSet::dist_indices_; + + /** The number of neighbors to keep */ + unsigned int capacity_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the radius nearest neighbors + * It is more accurate than RadiusResult as it is not limited in the number of neighbors + */ +template +class RadiusUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + RadiusUniqueResultSet(DistanceType radius) : + radius_(radius) + { + is_full_ = true; + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + void addPoint(DistanceType dist, int index) + { + if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index)); + } + + /** Remove all elements in the set + */ + inline void clear() + { + dist_indices_.clear(); + } + + + /** Check the status of the set + * @return alwys false + */ + inline bool full() const + { + return true; + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const + { + return radius_; + } +private: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::dist_indices_; + using UniqueResultSet::is_full_; + + /** The furthest distance a neighbor can be */ + DistanceType radius_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors within a radius distance + */ +template +class KNNRadiusUniqueResultSet : public KNNUniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius) + { + this->capacity_ = capacity; + this->radius_ = radius; + this->dist_indices_.reserve(capacity_); + this->clear(); + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = radius_; + is_full_ = false; + } +private: + using KNNUniqueResultSet::dist_indices_; + using KNNUniqueResultSet::is_full_; + using KNNUniqueResultSet::worst_distance_; + + /** The maximum number of neighbors to consider */ + unsigned int capacity_; + + /** The maximum distance of a neighbor */ + DistanceType radius_; +}; +} + +#endif //OPENCV_FLANN_RESULTSET_H + diff --git a/OpenCV/Headers/flann/sampling.h b/OpenCV/Headers/flann/sampling.h new file mode 100644 index 0000000000..396f177ae3 --- /dev/null +++ b/OpenCV/Headers/flann/sampling.h @@ -0,0 +1,81 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_SAMPLING_H_ +#define OPENCV_FLANN_SAMPLING_H_ + +#include "matrix.h" +#include "random.h" + +namespace cvflann +{ + +template +Matrix random_sample(Matrix& srcMatrix, long size, bool remove = false) +{ + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (long i=0; i +Matrix random_sample(const Matrix& srcMatrix, size_t size) +{ + UniqueRandom rand((int)srcMatrix.rows); + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (size_t i=0; i +#include + +#include "general.h" +#include "nn_index.h" + +#ifdef FLANN_SIGNATURE_ +#undef FLANN_SIGNATURE_ +#endif +#define FLANN_SIGNATURE_ "FLANN_INDEX" + +namespace cvflann +{ + +template +struct Datatype {}; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT64; } }; + + +/** + * Structure representing the index header. + */ +struct IndexHeader +{ + char signature[16]; + char version[16]; + flann_datatype_t data_type; + flann_algorithm_t index_type; + size_t rows; + size_t cols; +}; + +/** + * Saves index header to stream + * + * @param stream - Stream to save to + * @param index - The index to save + */ +template +void save_header(FILE* stream, const NNIndex& index) +{ + IndexHeader header; + memset(header.signature, 0, sizeof(header.signature)); + strcpy(header.signature, FLANN_SIGNATURE_); + memset(header.version, 0, sizeof(header.version)); + strcpy(header.version, FLANN_VERSION_); + header.data_type = Datatype::type(); + header.index_type = index.getType(); + header.rows = index.size(); + header.cols = index.veclen(); + + std::fwrite(&header, sizeof(header),1,stream); +} + + +/** + * + * @param stream - Stream to load from + * @return Index header + */ +inline IndexHeader load_header(FILE* stream) +{ + IndexHeader header; + size_t read_size = fread(&header,sizeof(header),1,stream); + + if (read_size!=(size_t)1) { + throw FLANNException("Invalid index file, cannot read"); + } + + if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { + throw FLANNException("Invalid index file, wrong signature"); + } + + return header; + +} + + +template +void save_value(FILE* stream, const T& value, size_t count = 1) +{ + fwrite(&value, sizeof(value),count, stream); +} + +template +void save_value(FILE* stream, const cvflann::Matrix& value) +{ + fwrite(&value, sizeof(value),1, stream); + fwrite(value.data, sizeof(T),value.rows*value.cols, stream); +} + +template +void save_value(FILE* stream, const std::vector& value) +{ + size_t size = value.size(); + fwrite(&size, sizeof(size_t), 1, stream); + fwrite(&value[0], sizeof(T), size, stream); +} + +template +void load_value(FILE* stream, T& value, size_t count = 1) +{ + size_t read_cnt = fread(&value, sizeof(value), count, stream); + if (read_cnt != count) { + throw FLANNException("Cannot read from file"); + } +} + +template +void load_value(FILE* stream, cvflann::Matrix& value) +{ + size_t read_cnt = fread(&value, sizeof(value), 1, stream); + if (read_cnt != 1) { + throw FLANNException("Cannot read from file"); + } + value.data = new T[value.rows*value.cols]; + read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream); + if (read_cnt != (size_t)(value.rows*value.cols)) { + throw FLANNException("Cannot read from file"); + } +} + + +template +void load_value(FILE* stream, std::vector& value) +{ + size_t size; + size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); + if (read_cnt!=1) { + throw FLANNException("Cannot read from file"); + } + value.resize(size); + read_cnt = fread(&value[0], sizeof(T), size, stream); + if (read_cnt != size) { + throw FLANNException("Cannot read from file"); + } +} + +} + +#endif /* OPENCV_FLANN_SAVING_H_ */ diff --git a/OpenCV/Headers/flann/simplex_downhill.h b/OpenCV/Headers/flann/simplex_downhill.h new file mode 100644 index 0000000000..145901ab0d --- /dev/null +++ b/OpenCV/Headers/flann/simplex_downhill.h @@ -0,0 +1,186 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ +#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ + +namespace cvflann +{ + +/** + Adds val to array vals (and point to array points) and keeping the arrays sorted by vals. + */ +template +void addValue(int pos, float val, float* vals, T* point, T* points, int n) +{ + vals[pos] = val; + for (int i=0; i0 && vals[j] +float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL ) +{ + const int MAX_ITERATIONS = 10; + + assert(n>0); + + T* p_o = new T[n]; + T* p_r = new T[n]; + T* p_e = new T[n]; + + int alpha = 1; + + int iterations = 0; + + bool ownVals = false; + if (vals == NULL) { + ownVals = true; + vals = new float[n+1]; + for (int i=0; i MAX_ITERATIONS) break; + + // compute average of simplex points (except the highest point) + for (int j=0; j=vals[0])&&(val_r=vals[n]) { + for (int i=0; i + + +namespace cvflann +{ + +/** + * A start-stop timer class. + * + * Can be used to time portions of code. + */ +class StartStopTimer +{ + clock_t startTime; + +public: + /** + * Value of the timer. + */ + double value; + + + /** + * Constructor. + */ + StartStopTimer() + { + reset(); + } + + /** + * Starts the timer. + */ + void start() + { + startTime = clock(); + } + + /** + * Stops the timer and updates timer value. + */ + void stop() + { + clock_t stopTime = clock(); + value += ( (double)stopTime - startTime) / CLOCKS_PER_SEC; + } + + /** + * Resets the timer value to 0. + */ + void reset() + { + value = 0; + } + +}; + +} + +#endif // FLANN_TIMER_H diff --git a/OpenCV/Headers/gpu/device/border_interpolate.hpp b/OpenCV/Headers/gpu/device/border_interpolate.hpp new file mode 100644 index 0000000000..2343ccab2a --- /dev/null +++ b/OpenCV/Headers/gpu/device/border_interpolate.hpp @@ -0,0 +1,714 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ +#define __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "vec_math.hpp" + +namespace cv { namespace gpu { namespace device +{ + ////////////////////////////////////////////////////////////// + // BrdConstant + + template struct BrdRowConstant + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits::all(0)) : width(width_), val(val_) {} + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return x >= 0 ? saturate_cast(data[x]) : val; + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return x < width ? saturate_cast(data[x]) : val; + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return (x >= 0 && x < width) ? saturate_cast(data[x]) : val; + } + + const int width; + const D val; + }; + + template struct BrdColConstant + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits::all(0)) : height(height_), val(val_) {} + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return y >= 0 ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return y < height ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return (y >= 0 && y < height) ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + const int height; + const D val; + }; + + template struct BrdConstant + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits::all(0)) : height(height_), width(width_), val(val_) + { + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(((const T*)((const uchar*)data + y * step))[x]) : val; + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(src(y, x)) : val; + } + + const int height; + const int width; + const D val; + }; + + ////////////////////////////////////////////////////////////// + // BrdReplicate + + template struct BrdRowReplicate + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::max(x, 0); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::min(x, last_col); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int last_col; + }; + + template struct BrdColReplicate + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::max(y, 0); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::min(y, last_row); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row(y) * step)); + } + + const int last_row; + }; + + template struct BrdReplicate + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::max(y, 0); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::min(y, last_row); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::max(x, 0); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::min(x, last_col); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int last_row; + const int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdReflect101 + + template struct BrdRowReflect101 + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::abs(x) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int last_col; + }; + + template struct BrdColReflect101 + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::abs(y) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + const int last_row; + }; + + template struct BrdReflect101 + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::abs(y) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::abs(x) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int last_row; + const int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdReflect + + template struct BrdRowReflect + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (::abs(x) - (x < 0)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(::abs(x) - (x < 0)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int last_col; + }; + + template struct BrdColReflect + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (::abs(y) - (y < 0)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(::abs(y) - (y < 0)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + const int last_row; + }; + + template struct BrdReflect + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (::abs(y) - (y < 0)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/; + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (::abs(x) - (x < 0)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (last_col - ::abs(last_col - x) + (x > last_col)); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int last_row; + const int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdWrap + + template struct BrdRowWrap + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {} + template __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (x < width) * x + (x >= width) * (x % width); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(idx_col_low(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int width; + }; + + template struct BrdColWrap + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {} + template __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return (y < height) * y + (y >= height) * (y % height); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(idx_row_low(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + const int height; + }; + + template struct BrdWrap + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdWrap(int height_, int width_) : + height(height_), width(width_) + { + } + template + __host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) : + height(height_), width(width_) + { + } + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return (y < height) * y + (y >= height) * (y % height); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(idx_row_low(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (x < width) * x + (x >= width) * (x % width); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(idx_col_low(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int height; + const int width; + }; + + ////////////////////////////////////////////////////////////// + // BorderReader + + template struct BorderReader + { + typedef typename B::result_type elem_type; + typedef typename Ptr2D::index_type index_type; + + __host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {} + + __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const + { + return b.at(y, x, ptr); + } + + const Ptr2D ptr; + const B b; + }; + + // under win32 there is some bug with templated types that passed as kernel parameters + // with this specialization all works fine + template struct BorderReader< Ptr2D, BrdConstant > + { + typedef typename BrdConstant::result_type elem_type; + typedef typename Ptr2D::index_type index_type; + + __host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant& b) : + src(src_), height(b.height), width(b.width), val(b.val) + { + } + + __device__ __forceinline__ D operator ()(index_type y, index_type x) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(src(y, x)) : val; + } + + const Ptr2D src; + const int height; + const int width; + const D val; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ diff --git a/OpenCV/Headers/gpu/device/color.hpp b/OpenCV/Headers/gpu/device/color.hpp new file mode 100644 index 0000000000..f659e34c1d --- /dev/null +++ b/OpenCV/Headers/gpu/device/color.hpp @@ -0,0 +1,221 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_COLOR_HPP__ +#define __OPENCV_GPU_COLOR_HPP__ + +#include "detail/color_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + // All OPENCV_GPU_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements + // template class ColorSpace1_to_ColorSpace2_traits + // { + // typedef ... functor_type; + // static __host__ __device__ functor_type create_functor(); + // }; + + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2) + + #undef OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6) + + #undef OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6) + + #undef OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3) + OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4) + + #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5) + OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6) + + #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6) + + #undef OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 2) + + #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS + + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 2) + + #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS + + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS + + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS + + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS + + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ diff --git a/OpenCV/Headers/gpu/device/common.hpp b/OpenCV/Headers/gpu/device/common.hpp new file mode 100644 index 0000000000..141467fdc8 --- /dev/null +++ b/OpenCV/Headers/gpu/device/common.hpp @@ -0,0 +1,114 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_COMMON_HPP__ +#define __OPENCV_GPU_COMMON_HPP__ + +#include +#include "opencv2/core/cuda_devptrs.hpp" + +#ifndef CV_PI + #define CV_PI 3.1415926535897932384626433832795 +#endif + +#ifndef CV_PI_F + #ifndef CV_PI + #define CV_PI_F 3.14159265f + #else + #define CV_PI_F ((float)CV_PI) + #endif +#endif + +#if defined(__GNUC__) + #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__) +#else /* defined(__CUDACC__) || defined(__MSVC__) */ + #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__) +#endif + +namespace cv { namespace gpu +{ + void error(const char *error_string, const char *file, const int line, const char *func); + + template static inline bool isAligned(const T* ptr, size_t size) + { + return reinterpret_cast(ptr) % size == 0; + } + + static inline bool isAligned(size_t step, size_t size) + { + return step % size == 0; + } +}} + +static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "") +{ + if (cudaSuccess != err) + cv::gpu::error(cudaGetErrorString(err), file, line, func); +} + +#ifdef __CUDACC__ + +namespace cv { namespace gpu +{ + __host__ __device__ __forceinline__ int divUp(int total, int grain) + { + return (total + grain - 1) / grain; + } + + namespace device + { + typedef unsigned char uchar; + typedef unsigned short ushort; + typedef signed char schar; + typedef unsigned int uint; + + template inline void bindTexture(const textureReference* tex, const PtrStepSz& img) + { + cudaChannelFormatDesc desc = cudaCreateChannelDesc(); + cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) ); + } + } +}} + +#endif // __CUDACC__ + +#endif // __OPENCV_GPU_COMMON_HPP__ diff --git a/OpenCV/Headers/gpu/device/datamov_utils.hpp b/OpenCV/Headers/gpu/device/datamov_utils.hpp new file mode 100644 index 0000000000..e05a22477c --- /dev/null +++ b/OpenCV/Headers/gpu/device/datamov_utils.hpp @@ -0,0 +1,105 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DATAMOV_UTILS_HPP__ +#define __OPENCV_GPU_DATAMOV_UTILS_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 + + // for Fermi memory space is detected automatically + template struct ForceGlob + { + __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = ptr[offset]; } + }; + + #else // __CUDA_ARCH__ >= 200 + + #if defined(_WIN64) || defined(__LP64__) + // 64-bit register modifier for inlined asm + #define OPENCV_GPU_ASM_PTR "l" + #else + // 32-bit register modifier for inlined asm + #define OPENCV_GPU_ASM_PTR "r" + #endif + + template struct ForceGlob; + + #define OPENCV_GPU_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \ + template <> struct ForceGlob \ + { \ + __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \ + { \ + asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_GPU_ASM_PTR(ptr + offset)); \ + } \ + }; + + #define OPENCV_GPU_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \ + template <> struct ForceGlob \ + { \ + __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \ + { \ + asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast(&val)) : OPENCV_GPU_ASM_PTR(ptr + offset)); \ + } \ + }; + + OPENCV_GPU_DEFINE_FORCE_GLOB_B(uchar, u8) + OPENCV_GPU_DEFINE_FORCE_GLOB_B(schar, s8) + OPENCV_GPU_DEFINE_FORCE_GLOB_B(char, b8) + OPENCV_GPU_DEFINE_FORCE_GLOB (ushort, u16, h) + OPENCV_GPU_DEFINE_FORCE_GLOB (short, s16, h) + OPENCV_GPU_DEFINE_FORCE_GLOB (uint, u32, r) + OPENCV_GPU_DEFINE_FORCE_GLOB (int, s32, r) + OPENCV_GPU_DEFINE_FORCE_GLOB (float, f32, f) + OPENCV_GPU_DEFINE_FORCE_GLOB (double, f64, d) + + #undef OPENCV_GPU_DEFINE_FORCE_GLOB + #undef OPENCV_GPU_DEFINE_FORCE_GLOB_B + #undef OPENCV_GPU_ASM_PTR + + #endif // __CUDA_ARCH__ >= 200 +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__ diff --git a/OpenCV/Headers/gpu/device/detail/color_detail.hpp b/OpenCV/Headers/gpu/device/detail/color_detail.hpp new file mode 100644 index 0000000000..981e62335c --- /dev/null +++ b/OpenCV/Headers/gpu/device/detail/color_detail.hpp @@ -0,0 +1,1542 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_COLOR_DETAIL_HPP__ +#define __OPENCV_GPU_COLOR_DETAIL_HPP__ + +#include "../common.hpp" +#include "../vec_traits.hpp" +#include "../saturate_cast.hpp" +#include "../limits.hpp" +#include "../functional.hpp" + +namespace cv { namespace gpu { namespace device +{ + #ifndef CV_DESCALE + #define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n)) + #endif + + namespace color_detail + { + template struct ColorChannel + { + typedef float worktype_f; + static __device__ __forceinline__ T max() { return numeric_limits::max(); } + static __device__ __forceinline__ T half() { return (T)(max()/2 + 1); } + }; + + template<> struct ColorChannel + { + typedef float worktype_f; + static __device__ __forceinline__ float max() { return 1.f; } + static __device__ __forceinline__ float half() { return 0.5f; } + }; + + template static __device__ __forceinline__ void setAlpha(typename TypeVec::vec_type& vec, T val) + { + } + + template static __device__ __forceinline__ void setAlpha(typename TypeVec::vec_type& vec, T val) + { + vec.w = val; + } + + template static __device__ __forceinline__ T getAlpha(const typename TypeVec::vec_type& vec) + { + return ColorChannel::max(); + } + + template static __device__ __forceinline__ T getAlpha(const typename TypeVec::vec_type& vec) + { + return vec.w; + } + + enum + { + yuv_shift = 14, + xyz_shift = 12, + R2Y = 4899, + G2Y = 9617, + B2Y = 1868, + BLOCK_SIZE = 256 + }; + } + +////////////////// Various 3/4-channel to 3/4-channel RGB transformations ///////////////// + + namespace color_detail + { + template struct RGB2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + dst.x = (&src.x)[bidx]; + dst.y = src.y; + dst.z = (&src.x)[bidx^2]; + setAlpha(dst, getAlpha(src)); + + return dst; + } + + __device__ __forceinline__ RGB2RGB() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + + __device__ __forceinline__ RGB2RGB(const RGB2RGB& other_) + :unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template <> struct RGB2RGB : unary_function + { + __device__ uint operator()(uint src) const + { + uint dst = 0; + + dst |= (0xffu & (src >> 16)); + dst |= (0xffu & (src >> 8)) << 8; + dst |= (0xffu & (src)) << 16; + dst |= (0xffu & (src >> 24)) << 24; + + return dst; + } + + __device__ __forceinline__ RGB2RGB():unary_function(){} + __device__ __forceinline__ RGB2RGB(const RGB2RGB& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB ////////// + + namespace color_detail + { + template struct RGB2RGB5x5Converter; + template struct RGB2RGB5x5Converter<6, bidx> + { + static __device__ __forceinline__ ushort cvt(const uchar3& src) + { + return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~3) << 3) | (((&src.x)[bidx^2] & ~7) << 8)); + } + + static __device__ __forceinline__ ushort cvt(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + return (ushort)((b >> 3) | ((g & ~3) << 3) | ((r & ~7) << 8)); + } + }; + + template struct RGB2RGB5x5Converter<5, bidx> + { + static __device__ __forceinline__ ushort cvt(const uchar3& src) + { + return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~7) << 2) | (((&src.x)[bidx^2] & ~7) << 7)); + } + + static __device__ __forceinline__ ushort cvt(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + uint a = 0xffu & (src >> 24); + return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7) | (a * 0x8000)); + } + }; + + template struct RGB2RGB5x5; + + template struct RGB2RGB5x5<3, bidx,green_bits> : unary_function + { + __device__ __forceinline__ ushort operator()(const uchar3& src) const + { + return RGB2RGB5x5Converter::cvt(src); + } + + __device__ __forceinline__ RGB2RGB5x5():unary_function(){} + __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5& other_):unary_function(){} + }; + + template struct RGB2RGB5x5<4, bidx,green_bits> : unary_function + { + __device__ __forceinline__ ushort operator()(uint src) const + { + return RGB2RGB5x5Converter::cvt(src); + } + + __device__ __forceinline__ RGB2RGB5x5():unary_function(){} + __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2RGB5x5 functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template struct RGB5x52RGBConverter; + + template struct RGB5x52RGBConverter<5, bidx> + { + static __device__ __forceinline__ void cvt(uint src, uchar3& dst) + { + (&dst.x)[bidx] = src << 3; + dst.y = (src >> 2) & ~7; + (&dst.x)[bidx ^ 2] = (src >> 7) & ~7; + } + + static __device__ __forceinline__ void cvt(uint src, uint& dst) + { + dst = 0; + + dst |= (0xffu & (src << 3)) << (bidx * 8); + dst |= (0xffu & ((src >> 2) & ~7)) << 8; + dst |= (0xffu & ((src >> 7) & ~7)) << ((bidx ^ 2) * 8); + dst |= ((src & 0x8000) * 0xffu) << 24; + } + }; + + template struct RGB5x52RGBConverter<6, bidx> + { + static __device__ __forceinline__ void cvt(uint src, uchar3& dst) + { + (&dst.x)[bidx] = src << 3; + dst.y = (src >> 3) & ~3; + (&dst.x)[bidx ^ 2] = (src >> 8) & ~7; + } + + static __device__ __forceinline__ void cvt(uint src, uint& dst) + { + dst = 0xffu << 24; + + dst |= (0xffu & (src << 3)) << (bidx * 8); + dst |= (0xffu &((src >> 3) & ~3)) << 8; + dst |= (0xffu & ((src >> 8) & ~7)) << ((bidx ^ 2) * 8); + } + }; + + template struct RGB5x52RGB; + + template struct RGB5x52RGB<3, bidx, green_bits> : unary_function + { + __device__ __forceinline__ uchar3 operator()(ushort src) const + { + uchar3 dst; + RGB5x52RGBConverter::cvt(src, dst); + return dst; + } + __device__ __forceinline__ RGB5x52RGB():unary_function(){} + __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB& other_):unary_function(){} + + }; + + template struct RGB5x52RGB<4, bidx, green_bits> : unary_function + { + __device__ __forceinline__ uint operator()(ushort src) const + { + uint dst; + RGB5x52RGBConverter::cvt(src, dst); + return dst; + } + __device__ __forceinline__ RGB5x52RGB():unary_function(){} + __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB5x52RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////// Grayscale to Color //////////////////////////////// + + namespace color_detail + { + template struct Gray2RGB : unary_function::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(T src) const + { + typename TypeVec::vec_type dst; + + dst.z = dst.y = dst.x = src; + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __device__ __forceinline__ Gray2RGB():unary_function::vec_type>(){} + __device__ __forceinline__ Gray2RGB(const Gray2RGB& other_) + : unary_function::vec_type>(){} + }; + + template <> struct Gray2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + uint dst = 0xffu << 24; + + dst |= src; + dst |= src << 8; + dst |= src << 16; + + return dst; + } + __device__ __forceinline__ Gray2RGB():unary_function(){} + __device__ __forceinline__ Gray2RGB(const Gray2RGB& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::Gray2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template struct Gray2RGB5x5Converter; + template<> struct Gray2RGB5x5Converter<6> + { + static __device__ __forceinline__ ushort cvt(uint t) + { + return (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8)); + } + }; + + template<> struct Gray2RGB5x5Converter<5> + { + static __device__ __forceinline__ ushort cvt(uint t) + { + t >>= 3; + return (ushort)(t | (t << 5) | (t << 10)); + } + }; + + template struct Gray2RGB5x5 : unary_function + { + __device__ __forceinline__ ushort operator()(uint src) const + { + return Gray2RGB5x5Converter::cvt(src); + } + + __device__ __forceinline__ Gray2RGB5x5():unary_function(){} + __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::Gray2RGB5x5 functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////// Color to Grayscale //////////////////////////////// + + namespace color_detail + { + template struct RGB5x52GrayConverter; + template <> struct RGB5x52GrayConverter<6> + { + static __device__ __forceinline__ uchar cvt(uint t) + { + return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 3) & 0xfc) * G2Y + ((t >> 8) & 0xf8) * R2Y, yuv_shift); + } + }; + + template <> struct RGB5x52GrayConverter<5> + { + static __device__ __forceinline__ uchar cvt(uint t) + { + return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 2) & 0xf8) * G2Y + ((t >> 7) & 0xf8) * R2Y, yuv_shift); + } + }; + + template struct RGB5x52Gray : unary_function + { + __device__ __forceinline__ uchar operator()(uint src) const + { + return RGB5x52GrayConverter::cvt(src); + } + __device__ __forceinline__ RGB5x52Gray() : unary_function(){} + __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB5x52Gray functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template static __device__ __forceinline__ T RGB2GrayConvert(const T* src) + { + return (T)CV_DESCALE((unsigned)(src[bidx] * B2Y + src[1] * G2Y + src[bidx^2] * R2Y), yuv_shift); + } + + template static __device__ __forceinline__ uchar RGB2GrayConvert(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift); + } + + template static __device__ __forceinline__ float RGB2GrayConvert(const float* src) + { + return src[bidx] * 0.114f + src[1] * 0.587f + src[bidx^2] * 0.299f; + } + + template struct RGB2Gray : unary_function::vec_type, T> + { + __device__ __forceinline__ T operator()(const typename TypeVec::vec_type& src) const + { + return RGB2GrayConvert(&src.x); + } + __device__ __forceinline__ RGB2Gray() : unary_function::vec_type, T>(){} + __device__ __forceinline__ RGB2Gray(const RGB2Gray& other_) + : unary_function::vec_type, T>(){} + }; + + template struct RGB2Gray : unary_function + { + __device__ __forceinline__ uchar operator()(uint src) const + { + return RGB2GrayConvert(src); + } + __device__ __forceinline__ RGB2Gray() : unary_function(){} + __device__ __forceinline__ RGB2Gray(const RGB2Gray& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2Gray functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> YUV ////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2YUVCoeffs_f[5] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f }; + __constant__ int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, 8061, 14369 }; + + template static __device__ void RGB2YUVConvert(const T* src, D& dst) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE(src[0] * c_RGB2YUVCoeffs_i[bidx^2] + src[1] * c_RGB2YUVCoeffs_i[1] + src[2] * c_RGB2YUVCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE((src[bidx^2] - Y) * c_RGB2YUVCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE((src[bidx] - Y) * c_RGB2YUVCoeffs_i[4] + delta, yuv_shift); + + dst.x = saturate_cast(Y); + dst.y = saturate_cast(Cr); + dst.z = saturate_cast(Cb); + } + + template static __device__ __forceinline__ void RGB2YUVConvert(const float* src, D& dst) + { + dst.x = src[0] * c_RGB2YUVCoeffs_f[bidx^2] + src[1] * c_RGB2YUVCoeffs_f[1] + src[2] * c_RGB2YUVCoeffs_f[bidx]; + dst.y = (src[bidx^2] - dst.x) * c_RGB2YUVCoeffs_f[3] + ColorChannel::half(); + dst.z = (src[bidx] - dst.x) * c_RGB2YUVCoeffs_f[4] + ColorChannel::half(); + } + + template struct RGB2YUV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YUVConvert(&src.x, dst); + return dst; + } + __device__ __forceinline__ RGB2YUV() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ RGB2YUV(const RGB2YUV& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2YUV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_YUV2RGBCoeffs_f[5] = { 2.032f, -0.395f, -0.581f, 1.140f }; + __constant__ int c_YUV2RGBCoeffs_i[5] = { 33292, -6472, -9519, 18678 }; + + template static __device__ void YUV2RGBConvert(const T& src, D* dst) + { + const int b = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift); + + const int g = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[2] + + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift); + + const int r = src.x + CV_DESCALE((src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift); + + dst[bidx] = saturate_cast(b); + dst[1] = saturate_cast(g); + dst[bidx^2] = saturate_cast(r); + } + + template static __device__ uint YUV2RGBConvert(uint src) + { + const int x = 0xff & (src); + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const int b = x + CV_DESCALE((z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift); + + const int g = x + CV_DESCALE((z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[2] + + (y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift); + + const int r = x + CV_DESCALE((y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(b) << (bidx * 8); + dst |= saturate_cast(g) << 8; + dst |= saturate_cast(r) << ((bidx ^ 2) * 8); + + return dst; + } + + template static __device__ __forceinline__ void YUV2RGBConvert(const T& src, float* dst) + { + dst[bidx] = src.x + (src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_f[3]; + + dst[1] = src.x + (src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_f[2] + + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_f[1]; + + dst[bidx^2] = src.x + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_f[0]; + } + + template struct YUV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YUV2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __device__ __forceinline__ YUV2RGB() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ YUV2RGB(const YUV2RGB& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct YUV2RGB : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return YUV2RGBConvert(src); + } + __device__ __forceinline__ YUV2RGB() : unary_function(){} + __device__ __forceinline__ YUV2RGB(const YUV2RGB& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::YUV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> YCrCb ////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2YCrCbCoeffs_f[5] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f}; + __constant__ int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, 11682, 9241}; + + template static __device__ void RGB2YCrCbConvert(const T* src, D& dst) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE(src[0] * c_RGB2YCrCbCoeffs_i[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_i[1] + src[2] * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE((src[bidx^2] - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE((src[bidx] - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift); + + dst.x = saturate_cast(Y); + dst.y = saturate_cast(Cr); + dst.z = saturate_cast(Cb); + } + + template static __device__ uint RGB2YCrCbConvert(uint src) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE((0xffu & src) * c_RGB2YCrCbCoeffs_i[bidx^2] + (0xffu & (src >> 8)) * c_RGB2YCrCbCoeffs_i[1] + (0xffu & (src >> 16)) * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE(((0xffu & (src >> ((bidx ^ 2) * 8))) - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE(((0xffu & (src >> (bidx * 8))) - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift); + + uint dst = 0; + + dst |= saturate_cast(Y); + dst |= saturate_cast(Cr) << 8; + dst |= saturate_cast(Cb) << 16; + + return dst; + } + + template static __device__ __forceinline__ void RGB2YCrCbConvert(const float* src, D& dst) + { + dst.x = src[0] * c_RGB2YCrCbCoeffs_f[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_f[1] + src[2] * c_RGB2YCrCbCoeffs_f[bidx]; + dst.y = (src[bidx^2] - dst.x) * c_RGB2YCrCbCoeffs_f[3] + ColorChannel::half(); + dst.z = (src[bidx] - dst.x) * c_RGB2YCrCbCoeffs_f[4] + ColorChannel::half(); + } + + template struct RGB2YCrCb + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YCrCbConvert(&src.x, dst); + return dst; + } + __device__ __forceinline__ RGB2YCrCb() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct RGB2YCrCb : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return RGB2YCrCbConvert(src); + } + + __device__ __forceinline__ RGB2YCrCb() : unary_function(){} + __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2YCrCb functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_YCrCb2RGBCoeffs_f[5] = {1.403f, -0.714f, -0.344f, 1.773f}; + __constant__ int c_YCrCb2RGBCoeffs_i[5] = {22987, -11698, -5636, 29049}; + + template static __device__ void YCrCb2RGBConvert(const T& src, D* dst) + { + const int b = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift); + const int g = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[2] + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift); + const int r = src.x + CV_DESCALE((src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift); + + dst[bidx] = saturate_cast(b); + dst[1] = saturate_cast(g); + dst[bidx^2] = saturate_cast(r); + } + + template static __device__ uint YCrCb2RGBConvert(uint src) + { + const int x = 0xff & (src); + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const int b = x + CV_DESCALE((z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift); + const int g = x + CV_DESCALE((z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[2] + (y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift); + const int r = x + CV_DESCALE((y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(b) << (bidx * 8); + dst |= saturate_cast(g) << 8; + dst |= saturate_cast(r) << ((bidx ^ 2) * 8); + + return dst; + } + + template __device__ __forceinline__ void YCrCb2RGBConvert(const T& src, float* dst) + { + dst[bidx] = src.x + (src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[3]; + dst[1] = src.x + (src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[2] + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[1]; + dst[bidx^2] = src.x + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[0]; + } + + template struct YCrCb2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YCrCb2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __device__ __forceinline__ YCrCb2RGB() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct YCrCb2RGB : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return YCrCb2RGBConvert(src); + } + __device__ __forceinline__ YCrCb2RGB() : unary_function(){} + __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::YCrCb2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2XYZ_D65f[9] = { 0.412453f, 0.357580f, 0.180423f, 0.212671f, 0.715160f, 0.072169f, 0.019334f, 0.119193f, 0.950227f }; + __constant__ int c_RGB2XYZ_D65i[9] = { 1689, 1465, 739, 871, 2929, 296, 79, 488, 3892 }; + + template static __device__ __forceinline__ void RGB2XYZConvert(const T* src, D& dst) + { + dst.x = saturate_cast(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[0] + src[1] * c_RGB2XYZ_D65i[1] + src[bidx] * c_RGB2XYZ_D65i[2], xyz_shift)); + dst.y = saturate_cast(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[3] + src[1] * c_RGB2XYZ_D65i[4] + src[bidx] * c_RGB2XYZ_D65i[5], xyz_shift)); + dst.z = saturate_cast(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[6] + src[1] * c_RGB2XYZ_D65i[7] + src[bidx] * c_RGB2XYZ_D65i[8], xyz_shift)); + } + + template static __device__ __forceinline__ uint RGB2XYZConvert(uint src) + { + const uint b = 0xffu & (src >> (bidx * 8)); + const uint g = 0xffu & (src >> 8); + const uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + + const uint x = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift)); + const uint y = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift)); + const uint z = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift)); + + uint dst = 0; + + dst |= x; + dst |= y << 8; + dst |= z << 16; + + return dst; + } + + template static __device__ __forceinline__ void RGB2XYZConvert(const float* src, D& dst) + { + dst.x = src[bidx^2] * c_RGB2XYZ_D65f[0] + src[1] * c_RGB2XYZ_D65f[1] + src[bidx] * c_RGB2XYZ_D65f[2]; + dst.y = src[bidx^2] * c_RGB2XYZ_D65f[3] + src[1] * c_RGB2XYZ_D65f[4] + src[bidx] * c_RGB2XYZ_D65f[5]; + dst.z = src[bidx^2] * c_RGB2XYZ_D65f[6] + src[1] * c_RGB2XYZ_D65f[7] + src[bidx] * c_RGB2XYZ_D65f[8]; + } + + template struct RGB2XYZ + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2XYZConvert(&src.x, dst); + + return dst; + } + __device__ __forceinline__ RGB2XYZ() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ RGB2XYZ(const RGB2XYZ& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct RGB2XYZ : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2XYZConvert(src); + } + __device__ __forceinline__ RGB2XYZ() : unary_function(){} + __device__ __forceinline__ RGB2XYZ(const RGB2XYZ& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2XYZ functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_XYZ2sRGB_D65f[9] = { 3.240479f, -1.53715f, -0.498535f, -0.969256f, 1.875991f, 0.041556f, 0.055648f, -0.204043f, 1.057311f }; + __constant__ int c_XYZ2sRGB_D65i[9] = { 13273, -6296, -2042, -3970, 7684, 170, 228, -836, 4331 }; + + template static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, D* dst) + { + dst[bidx^2] = saturate_cast(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[0] + src.y * c_XYZ2sRGB_D65i[1] + src.z * c_XYZ2sRGB_D65i[2], xyz_shift)); + dst[1] = saturate_cast(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[3] + src.y * c_XYZ2sRGB_D65i[4] + src.z * c_XYZ2sRGB_D65i[5], xyz_shift)); + dst[bidx] = saturate_cast(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[6] + src.y * c_XYZ2sRGB_D65i[7] + src.z * c_XYZ2sRGB_D65i[8], xyz_shift)); + } + + template static __device__ __forceinline__ uint XYZ2RGBConvert(uint src) + { + const int x = 0xff & src; + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const uint r = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[0] + y * c_XYZ2sRGB_D65i[1] + z * c_XYZ2sRGB_D65i[2], xyz_shift)); + const uint g = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[3] + y * c_XYZ2sRGB_D65i[4] + z * c_XYZ2sRGB_D65i[5], xyz_shift)); + const uint b = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[6] + y * c_XYZ2sRGB_D65i[7] + z * c_XYZ2sRGB_D65i[8], xyz_shift)); + + uint dst = 0xffu << 24; + + dst |= b << (bidx * 8); + dst |= g << 8; + dst |= r << ((bidx ^ 2) * 8); + + return dst; + } + + template static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, float* dst) + { + dst[bidx^2] = src.x * c_XYZ2sRGB_D65f[0] + src.y * c_XYZ2sRGB_D65f[1] + src.z * c_XYZ2sRGB_D65f[2]; + dst[1] = src.x * c_XYZ2sRGB_D65f[3] + src.y * c_XYZ2sRGB_D65f[4] + src.z * c_XYZ2sRGB_D65f[5]; + dst[bidx] = src.x * c_XYZ2sRGB_D65f[6] + src.y * c_XYZ2sRGB_D65f[7] + src.z * c_XYZ2sRGB_D65f[8]; + } + + template struct XYZ2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + XYZ2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __device__ __forceinline__ XYZ2RGB() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ XYZ2RGB(const XYZ2RGB& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct XYZ2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return XYZ2RGBConvert(src); + } + __device__ __forceinline__ XYZ2RGB() : unary_function(){} + __device__ __forceinline__ XYZ2RGB(const XYZ2RGB& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::XYZ2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +////////////////////////////////////// RGB <-> HSV /////////////////////////////////////// + + namespace color_detail + { + __constant__ int c_HsvDivTable [256] = {0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211, 130560, 116053, 104448, 94953, 87040, 80345, 74606, 69632, 65280, 61440, 58027, 54973, 52224, 49737, 47476, 45412, 43520, 41779, 40172, 38684, 37303, 36017, 34816, 33693, 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782, 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223, 21760, 21316, 20890, 20480, 20086, 19707, 19342, 18991, 18651, 18324, 18008, 17703, 17408, 17123, 16846, 16579, 16320, 16069, 15825, 15589, 15360, 15137, 14921, 14711, 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221, 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006, 11869, 11736, 11605, 11478, 11353, 11231, 11111, 10995, 10880, 10768, 10658, 10550, 10445, 10341, 10240, 10141, 10043, 9947, 9854, 9761, 9671, 9582, 9495, 9410, 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777, 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224, 8160, 8097, 8034, 7973, 7913, 7853, 7795, 7737, 7680, 7624, 7569, 7514, 7461, 7408, 7355, 7304, 7253, 7203, 7154, 7105, 7057, 7010, 6963, 6917, 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569, 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254, 6217, 6180, 6144, 6108, 6073, 6037, 6003, 5968, 5935, 5901, 5868, 5835, 5803, 5771, 5739, 5708, 5677, 5646, 5615, 5585, 5556, 5526, 5497, 5468, 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249, 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046, 5022, 4998, 4974, 4950, 4927, 4904, 4881, 4858, 4836, 4813, 4791, 4769, 4748, 4726, 4705, 4684, 4663, 4642, 4622, 4601, 4581, 4561, 4541, 4522, 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370, 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229, 4212, 4195, 4178, 4161, 4145, 4128, 4112, 4096}; + __constant__ int c_HsvDivTable180[256] = {0, 122880, 61440, 40960, 30720, 24576, 20480, 17554, 15360, 13653, 12288, 11171, 10240, 9452, 8777, 8192, 7680, 7228, 6827, 6467, 6144, 5851, 5585, 5343, 5120, 4915, 4726, 4551, 4389, 4237, 4096, 3964, 3840, 3724, 3614, 3511, 3413, 3321, 3234, 3151, 3072, 2997, 2926, 2858, 2793, 2731, 2671, 2614, 2560, 2508, 2458, 2409, 2363, 2318, 2276, 2234, 2194, 2156, 2119, 2083, 2048, 2014, 1982, 1950, 1920, 1890, 1862, 1834, 1807, 1781, 1755, 1731, 1707, 1683, 1661, 1638, 1617, 1596, 1575, 1555, 1536, 1517, 1499, 1480, 1463, 1446, 1429, 1412, 1396, 1381, 1365, 1350, 1336, 1321, 1307, 1293, 1280, 1267, 1254, 1241, 1229, 1217, 1205, 1193, 1182, 1170, 1159, 1148, 1138, 1127, 1117, 1107, 1097, 1087, 1078, 1069, 1059, 1050, 1041, 1033, 1024, 1016, 1007, 999, 991, 983, 975, 968, 960, 953, 945, 938, 931, 924, 917, 910, 904, 897, 890, 884, 878, 871, 865, 859, 853, 847, 842, 836, 830, 825, 819, 814, 808, 803, 798, 793, 788, 783, 778, 773, 768, 763, 759, 754, 749, 745, 740, 736, 731, 727, 723, 719, 714, 710, 706, 702, 698, 694, 690, 686, 683, 679, 675, 671, 668, 664, 661, 657, 654, 650, 647, 643, 640, 637, 633, 630, 627, 624, 621, 617, 614, 611, 608, 605, 602, 599, 597, 594, 591, 588, 585, 582, 580, 577, 574, 572, 569, 566, 564, 561, 559, 556, 554, 551, 549, 546, 544, 541, 539, 537, 534, 532, 530, 527, 525, 523, 521, 518, 516, 514, 512, 510, 508, 506, 504, 502, 500, 497, 495, 493, 492, 490, 488, 486, 484, 482}; + __constant__ int c_HsvDivTable256[256] = {0, 174763, 87381, 58254, 43691, 34953, 29127, 24966, 21845, 19418, 17476, 15888, 14564, 13443, 12483, 11651, 10923, 10280, 9709, 9198, 8738, 8322, 7944, 7598, 7282, 6991, 6722, 6473, 6242, 6026, 5825, 5638, 5461, 5296, 5140, 4993, 4855, 4723, 4599, 4481, 4369, 4263, 4161, 4064, 3972, 3884, 3799, 3718, 3641, 3567, 3495, 3427, 3361, 3297, 3236, 3178, 3121, 3066, 3013, 2962, 2913, 2865, 2819, 2774, 2731, 2689, 2648, 2608, 2570, 2533, 2497, 2461, 2427, 2394, 2362, 2330, 2300, 2270, 2241, 2212, 2185, 2158, 2131, 2106, 2081, 2056, 2032, 2009, 1986, 1964, 1942, 1920, 1900, 1879, 1859, 1840, 1820, 1802, 1783, 1765, 1748, 1730, 1713, 1697, 1680, 1664, 1649, 1633, 1618, 1603, 1589, 1574, 1560, 1547, 1533, 1520, 1507, 1494, 1481, 1469, 1456, 1444, 1432, 1421, 1409, 1398, 1387, 1376, 1365, 1355, 1344, 1334, 1324, 1314, 1304, 1295, 1285, 1276, 1266, 1257, 1248, 1239, 1231, 1222, 1214, 1205, 1197, 1189, 1181, 1173, 1165, 1157, 1150, 1142, 1135, 1128, 1120, 1113, 1106, 1099, 1092, 1085, 1079, 1072, 1066, 1059, 1053, 1046, 1040, 1034, 1028, 1022, 1016, 1010, 1004, 999, 993, 987, 982, 976, 971, 966, 960, 955, 950, 945, 940, 935, 930, 925, 920, 915, 910, 906, 901, 896, 892, 887, 883, 878, 874, 869, 865, 861, 857, 853, 848, 844, 840, 836, 832, 828, 824, 820, 817, 813, 809, 805, 802, 798, 794, 791, 787, 784, 780, 777, 773, 770, 767, 763, 760, 757, 753, 750, 747, 744, 741, 737, 734, 731, 728, 725, 722, 719, 716, 713, 710, 708, 705, 702, 699, 696, 694, 691, 688, 685}; + + template static __device__ void RGB2HSVConvert(const uchar* src, D& dst) + { + const int hsv_shift = 12; + const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256; + + int b = src[bidx], g = src[1], r = src[bidx^2]; + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + v = ::max(v, g); + v = ::max(v, r); + vmin = ::min(vmin, g); + vmin = ::min(vmin, r); + + diff = v - vmin; + vr = (v == r) * -1; + vg = (v == g) * -1; + + s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += (h < 0) * hr; + + dst.x = saturate_cast(h); + dst.y = (uchar)s; + dst.z = (uchar)v; + } + + template static __device__ uint RGB2HSVConvert(uint src) + { + const int hsv_shift = 12; + const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256; + + const int b = 0xff & (src >> (bidx * 8)); + const int g = 0xff & (src >> 8); + const int r = 0xff & (src >> ((bidx ^ 2) * 8)); + + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + v = ::max(v, g); + v = ::max(v, r); + vmin = ::min(vmin, g); + vmin = ::min(vmin, r); + + diff = v - vmin; + vr = (v == r) * -1; + vg = (v == g) * -1; + + s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += (h < 0) * hr; + + uint dst = 0; + + dst |= saturate_cast(h); + dst |= (0xffu & s) << 8; + dst |= (0xffu & v) << 16; + + return dst; + } + + template static __device__ void RGB2HSVConvert(const float* src, D& dst) + { + const float hscale = hr * (1.f / 360.f); + + float b = src[bidx], g = src[1], r = src[bidx^2]; + float h, s, v; + + float vmin, diff; + + v = vmin = r; + v = fmax(v, g); + v = fmax(v, b); + vmin = fmin(vmin, g); + vmin = fmin(vmin, b); + + diff = v - vmin; + s = diff / (float)(::fabs(v) + numeric_limits::epsilon()); + diff = (float)(60. / (diff + numeric_limits::epsilon())); + + h = (v == r) * (g - b) * diff; + h += (v != r && v == g) * ((b - r) * diff + 120.f); + h += (v != r && v != g) * ((r - g) * diff + 240.f); + h += (h < 0) * 360.f; + + dst.x = h * hscale; + dst.y = s; + dst.z = v; + } + + template struct RGB2HSV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HSVConvert(&src.x, dst); + + return dst; + } + __device__ __forceinline__ RGB2HSV() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ RGB2HSV(const RGB2HSV& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct RGB2HSV : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2HSVConvert(src); + } + __device__ __forceinline__ RGB2HSV():unary_function(){} + __device__ __forceinline__ RGB2HSV(const RGB2HSV& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ int c_HsvSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} }; + + template static __device__ void HSV2RGBConvert(const T& src, float* dst) + { + const float hscale = 6.f / hr; + + float h = src.x, s = src.y, v = src.z; + float b = v, g = v, r = v; + + if (s != 0) + { + h *= hscale; + + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + int sector = __float2int_rd(h); + h -= sector; + + if ( (unsigned)sector >= 6u ) + { + sector = 0; + h = 0.f; + } + + float tab[4]; + tab[0] = v; + tab[1] = v * (1.f - s); + tab[2] = v * (1.f - s * h); + tab[3] = v * (1.f - s * (1.f - h)); + + b = tab[c_HsvSectorData[sector][0]]; + g = tab[c_HsvSectorData[sector][1]]; + r = tab[c_HsvSectorData[sector][2]]; + } + + dst[bidx] = b; + dst[1] = g; + dst[bidx^2] = r; + } + + template static __device__ void HSV2RGBConvert(const T& src, uchar* dst) + { + float3 buf; + + buf.x = src.x; + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + HSV2RGBConvert(buf, &buf.x); + + dst[0] = saturate_cast(buf.x * 255.f); + dst[1] = saturate_cast(buf.y * 255.f); + dst[2] = saturate_cast(buf.z * 255.f); + } + + template static __device__ uint HSV2RGBConvert(uint src) + { + float3 buf; + + buf.x = src & 0xff; + buf.y = ((src >> 8) & 0xff) * (1.f/255.f); + buf.z = ((src >> 16) & 0xff) * (1.f/255.f); + + HSV2RGBConvert(buf, &buf.x); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x * 255.f); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct HSV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HSV2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __device__ __forceinline__ HSV2RGB() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ HSV2RGB(const HSV2RGB& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct HSV2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return HSV2RGBConvert(src); + } + __device__ __forceinline__ HSV2RGB():unary_function(){} + __device__ __forceinline__ HSV2RGB(const HSV2RGB& other_):unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +/////////////////////////////////////// RGB <-> HLS //////////////////////////////////////// + + namespace color_detail + { + template static __device__ void RGB2HLSConvert(const float* src, D& dst) + { + const float hscale = hr * (1.f / 360.f); + + float b = src[bidx], g = src[1], r = src[bidx^2]; + float h = 0.f, s = 0.f, l; + float vmin, vmax, diff; + + vmax = vmin = r; + vmax = fmax(vmax, g); + vmax = fmax(vmax, b); + vmin = fmin(vmin, g); + vmin = fmin(vmin, b); + + diff = vmax - vmin; + l = (vmax + vmin) * 0.5f; + + if (diff > numeric_limits::epsilon()) + { + s = (l < 0.5f) * diff / (vmax + vmin); + s += (l >= 0.5f) * diff / (2.0f - vmax - vmin); + + diff = 60.f / diff; + + h = (vmax == r) * (g - b) * diff; + h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f); + h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f); + h += (h < 0.f) * 360.f; + } + + dst.x = h * hscale; + dst.y = l; + dst.z = s; + } + + template static __device__ void RGB2HLSConvert(const uchar* src, D& dst) + { + float3 buf; + + buf.x = src[0] * (1.f / 255.f); + buf.y = src[1] * (1.f / 255.f); + buf.z = src[2] * (1.f / 255.f); + + RGB2HLSConvert(&buf.x, buf); + + dst.x = saturate_cast(buf.x); + dst.y = saturate_cast(buf.y*255.f); + dst.z = saturate_cast(buf.z*255.f); + } + + template static __device__ uint RGB2HLSConvert(uint src) + { + float3 buf; + + buf.x = (0xff & src) * (1.f / 255.f); + buf.y = (0xff & (src >> 8)) * (1.f / 255.f); + buf.z = (0xff & (src >> 16)) * (1.f / 255.f); + + RGB2HLSConvert(&buf.x, buf); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct RGB2HLS + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HLSConvert(&src.x, dst); + + return dst; + } + __device__ __forceinline__ RGB2HLS() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ RGB2HLS(const RGB2HLS& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct RGB2HLS : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2HLSConvert(src); + } + __device__ __forceinline__ RGB2HLS() : unary_function(){} + __device__ __forceinline__ RGB2HLS(const RGB2HLS& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ int c_HlsSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} }; + + template static __device__ void HLS2RGBConvert(const T& src, float* dst) + { + const float hscale = 6.0f / hr; + + float h = src.x, l = src.y, s = src.z; + float b = l, g = l, r = l; + + if (s != 0) + { + float p2 = (l <= 0.5f) * l * (1 + s); + p2 += (l > 0.5f) * (l + s - l * s); + float p1 = 2 * l - p2; + + h *= hscale; + + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + int sector; + sector = __float2int_rd(h); + + h -= sector; + + float tab[4]; + tab[0] = p2; + tab[1] = p1; + tab[2] = p1 + (p2 - p1) * (1 - h); + tab[3] = p1 + (p2 - p1) * h; + + b = tab[c_HlsSectorData[sector][0]]; + g = tab[c_HlsSectorData[sector][1]]; + r = tab[c_HlsSectorData[sector][2]]; + } + + dst[bidx] = b; + dst[1] = g; + dst[bidx^2] = r; + } + + template static __device__ void HLS2RGBConvert(const T& src, uchar* dst) + { + float3 buf; + + buf.x = src.x; + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + HLS2RGBConvert(buf, &buf.x); + + dst[0] = saturate_cast(buf.x * 255.f); + dst[1] = saturate_cast(buf.y * 255.f); + dst[2] = saturate_cast(buf.z * 255.f); + } + + template static __device__ uint HLS2RGBConvert(uint src) + { + float3 buf; + + buf.x = 0xff & src; + buf.y = (0xff & (src >> 8)) * (1.f / 255.f); + buf.z = (0xff & (src >> 16)) * (1.f / 255.f); + + HLS2RGBConvert(buf, &buf.x); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x * 255.f); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct HLS2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HLS2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __device__ __forceinline__ HLS2RGB() + : unary_function::vec_type, typename TypeVec::vec_type>(){} + __device__ __forceinline__ HLS2RGB(const HLS2RGB& other_) + : unary_function::vec_type, typename TypeVec::vec_type>(){} + }; + + template struct HLS2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return HLS2RGBConvert(src); + } + __device__ __forceinline__ HLS2RGB() : unary_function(){} + __device__ __forceinline__ HLS2RGB(const HLS2RGB& other_) : unary_function(){} + }; + } + +#define OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + #undef CV_DESCALE +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__ diff --git a/OpenCV/Headers/gpu/device/detail/reduction_detail.hpp b/OpenCV/Headers/gpu/device/detail/reduction_detail.hpp new file mode 100644 index 0000000000..0274f204a2 --- /dev/null +++ b/OpenCV/Headers/gpu/device/detail/reduction_detail.hpp @@ -0,0 +1,841 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_REDUCTION_DETAIL_HPP__ +#define __OPENCV_GPU_REDUCTION_DETAIL_HPP__ + +namespace cv { namespace gpu { namespace device +{ + namespace utility_detail + { + /////////////////////////////////////////////////////////////////////////////// + // Reductor + + template struct WarpReductor + { + template static __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + if (tid < n) + data[tid] = partial_reduction; + if (n > 32) __syncthreads(); + + if (n > 32) + { + if (tid < n - 32) + data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]); + if (tid < 16) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]); + } + } + else if (n > 16) + { + if (tid < n - 16) + data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]); + if (tid < 8) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]); + } + } + else if (n > 8) + { + if (tid < n - 8) + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]); + if (tid < 4) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]); + } + } + else if (n > 4) + { + if (tid < n - 4) + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]); + if (tid < 2) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]); + } + } + else if (n > 2) + { + if (tid < n - 2) + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]); + if (tid < 2) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]); + } + } + } + }; + template <> struct WarpReductor<64> + { + template static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + data[tid] = partial_reduction; + __syncthreads(); + + if (tid < 32) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); + } + } + }; + template <> struct WarpReductor<32> + { + template static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + data[tid] = partial_reduction; + + if (tid < 16) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); + } + } + }; + template <> struct WarpReductor<16> + { + template static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + data[tid] = partial_reduction; + + if (tid < 8) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); + } + } + }; + template <> struct WarpReductor<8> + { + template static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + data[tid] = partial_reduction; + + if (tid < 4) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); + } + } + }; + + template struct ReductionDispatcher; + template <> struct ReductionDispatcher + { + template static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + WarpReductor::reduce(data, partial_reduction, tid, op); + } + }; + template <> struct ReductionDispatcher + { + template static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + if (tid < n) + data[tid] = partial_reduction; + __syncthreads(); + + + if (n == 512) { if (tid < 256) { data[tid] = partial_reduction = op(partial_reduction, data[tid + 256]); } __syncthreads(); } + if (n >= 256) { if (tid < 128) { data[tid] = partial_reduction = op(partial_reduction, data[tid + 128]); } __syncthreads(); } + if (n >= 128) { if (tid < 64) { data[tid] = partial_reduction = op(partial_reduction, data[tid + 64]); } __syncthreads(); } + + if (tid < 32) + { + data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]); + data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]); + } + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // PredValWarpReductor + + template struct PredValWarpReductor; + template <> struct PredValWarpReductor<64> + { + template + static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred) + { + if (tid < 32) + { + myData = sdata[tid]; + myVal = sval[tid]; + + T reg = sdata[tid + 32]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 32]; + } + + reg = sdata[tid + 16]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 16]; + } + + reg = sdata[tid + 8]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 8]; + } + + reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 1]; + } + } + } + }; + template <> struct PredValWarpReductor<32> + { + template + static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred) + { + if (tid < 16) + { + myData = sdata[tid]; + myVal = sval[tid]; + + T reg = sdata[tid + 16]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 16]; + } + + reg = sdata[tid + 8]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 8]; + } + + reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 1]; + } + } + } + }; + + template <> struct PredValWarpReductor<16> + { + template + static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred) + { + if (tid < 8) + { + myData = sdata[tid]; + myVal = sval[tid]; + + T reg = reg = sdata[tid + 8]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 8]; + } + + reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 1]; + } + } + } + }; + template <> struct PredValWarpReductor<8> + { + template + static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred) + { + if (tid < 4) + { + myData = sdata[tid]; + myVal = sval[tid]; + + T reg = reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 1]; + } + } + } + }; + + template struct PredValReductionDispatcher; + template <> struct PredValReductionDispatcher + { + template static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred) + { + PredValWarpReductor::reduce(myData, myVal, sdata, sval, tid, pred); + } + }; + template <> struct PredValReductionDispatcher + { + template static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred) + { + myData = sdata[tid]; + myVal = sval[tid]; + + if (n >= 512 && tid < 256) + { + T reg = sdata[tid + 256]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 256]; + } + __syncthreads(); + } + if (n >= 256 && tid < 128) + { + T reg = sdata[tid + 128]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 128]; + } + __syncthreads(); + } + if (n >= 128 && tid < 64) + { + T reg = sdata[tid + 64]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 64]; + } + __syncthreads(); + } + + if (tid < 32) + { + if (n >= 64) + { + T reg = sdata[tid + 32]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 32]; + } + } + if (n >= 32) + { + T reg = sdata[tid + 16]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 16]; + } + } + if (n >= 16) + { + T reg = sdata[tid + 8]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 8]; + } + } + if (n >= 8) + { + T reg = sdata[tid + 4]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 4]; + } + } + if (n >= 4) + { + T reg = sdata[tid + 2]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 2]; + } + } + if (n >= 2) + { + T reg = sdata[tid + 1]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval[tid] = myVal = sval[tid + 1]; + } + } + } + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // PredVal2WarpReductor + + template struct PredVal2WarpReductor; + template <> struct PredVal2WarpReductor<64> + { + template + static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred) + { + if (tid < 32) + { + myData = sdata[tid]; + myVal1 = sval1[tid]; + myVal2 = sval2[tid]; + + T reg = sdata[tid + 32]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 32]; + sval2[tid] = myVal2 = sval2[tid + 32]; + } + + reg = sdata[tid + 16]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 16]; + sval2[tid] = myVal2 = sval2[tid + 16]; + } + + reg = sdata[tid + 8]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 8]; + sval2[tid] = myVal2 = sval2[tid + 8]; + } + + reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 4]; + sval2[tid] = myVal2 = sval2[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 2]; + sval2[tid] = myVal2 = sval2[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 1]; + sval2[tid] = myVal2 = sval2[tid + 1]; + } + } + } + }; + template <> struct PredVal2WarpReductor<32> + { + template + static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred) + { + if (tid < 16) + { + myData = sdata[tid]; + myVal1 = sval1[tid]; + myVal2 = sval2[tid]; + + T reg = sdata[tid + 16]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 16]; + sval2[tid] = myVal2 = sval2[tid + 16]; + } + + reg = sdata[tid + 8]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 8]; + sval2[tid] = myVal2 = sval2[tid + 8]; + } + + reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 4]; + sval2[tid] = myVal2 = sval2[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 2]; + sval2[tid] = myVal2 = sval2[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 1]; + sval2[tid] = myVal2 = sval2[tid + 1]; + } + } + } + }; + + template <> struct PredVal2WarpReductor<16> + { + template + static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred) + { + if (tid < 8) + { + myData = sdata[tid]; + myVal1 = sval1[tid]; + myVal2 = sval2[tid]; + + T reg = reg = sdata[tid + 8]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 8]; + sval2[tid] = myVal2 = sval2[tid + 8]; + } + + reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 4]; + sval2[tid] = myVal2 = sval2[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 2]; + sval2[tid] = myVal2 = sval2[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 1]; + sval2[tid] = myVal2 = sval2[tid + 1]; + } + } + } + }; + template <> struct PredVal2WarpReductor<8> + { + template + static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred) + { + if (tid < 4) + { + myData = sdata[tid]; + myVal1 = sval1[tid]; + myVal2 = sval2[tid]; + + T reg = reg = sdata[tid + 4]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 4]; + sval2[tid] = myVal2 = sval2[tid + 4]; + } + + reg = sdata[tid + 2]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 2]; + sval2[tid] = myVal2 = sval2[tid + 2]; + } + + reg = sdata[tid + 1]; + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 1]; + sval2[tid] = myVal2 = sval2[tid + 1]; + } + } + } + }; + + template struct PredVal2ReductionDispatcher; + template <> struct PredVal2ReductionDispatcher + { + template + static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred) + { + PredVal2WarpReductor::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred); + } + }; + template <> struct PredVal2ReductionDispatcher + { + template + static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred) + { + myData = sdata[tid]; + myVal1 = sval1[tid]; + myVal2 = sval2[tid]; + + if (n >= 512 && tid < 256) + { + T reg = sdata[tid + 256]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 256]; + sval2[tid] = myVal2 = sval2[tid + 256]; + } + __syncthreads(); + } + if (n >= 256 && tid < 128) + { + T reg = sdata[tid + 128]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 128]; + sval2[tid] = myVal2 = sval2[tid + 128]; + } + __syncthreads(); + } + if (n >= 128 && tid < 64) + { + T reg = sdata[tid + 64]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 64]; + sval2[tid] = myVal2 = sval2[tid + 64]; + } + __syncthreads(); + } + + if (tid < 32) + { + if (n >= 64) + { + T reg = sdata[tid + 32]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 32]; + sval2[tid] = myVal2 = sval2[tid + 32]; + } + } + if (n >= 32) + { + T reg = sdata[tid + 16]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 16]; + sval2[tid] = myVal2 = sval2[tid + 16]; + } + } + if (n >= 16) + { + T reg = sdata[tid + 8]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 8]; + sval2[tid] = myVal2 = sval2[tid + 8]; + } + } + if (n >= 8) + { + T reg = sdata[tid + 4]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 4]; + sval2[tid] = myVal2 = sval2[tid + 4]; + } + } + if (n >= 4) + { + T reg = sdata[tid + 2]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 2]; + sval2[tid] = myVal2 = sval2[tid + 2]; + } + } + if (n >= 2) + { + T reg = sdata[tid + 1]; + + if (pred(reg, myData)) + { + sdata[tid] = myData = reg; + sval1[tid] = myVal1 = sval1[tid + 1]; + sval2[tid] = myVal2 = sval2[tid + 1]; + } + } + } + } + }; + } // namespace utility_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_REDUCTION_DETAIL_HPP__ diff --git a/OpenCV/Headers/gpu/device/detail/transform_detail.hpp b/OpenCV/Headers/gpu/device/detail/transform_detail.hpp new file mode 100644 index 0000000000..10da5938c5 --- /dev/null +++ b/OpenCV/Headers/gpu/device/detail/transform_detail.hpp @@ -0,0 +1,395 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TRANSFORM_DETAIL_HPP__ +#define __OPENCV_GPU_TRANSFORM_DETAIL_HPP__ + +#include "../common.hpp" +#include "../vec_traits.hpp" +#include "../functional.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace transform_detail + { + //! Read Write Traits + + template struct UnaryReadWriteTraits + { + typedef typename TypeVec::vec_type read_type; + typedef typename TypeVec::vec_type write_type; + }; + + template struct BinaryReadWriteTraits + { + typedef typename TypeVec::vec_type read_type1; + typedef typename TypeVec::vec_type read_type2; + typedef typename TypeVec::vec_type write_type; + }; + + //! Transform kernels + + template struct OpUnroller; + template <> struct OpUnroller<1> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + } + }; + template <> struct OpUnroller<2> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + } + }; + template <> struct OpUnroller<3> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src.z); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src1.z, src2.z); + } + }; + template <> struct OpUnroller<4> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src.z); + if (mask(y, x_shifted + 3)) + dst.w = op(src.w); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src1.z, src2.z); + if (mask(y, x_shifted + 3)) + dst.w = op(src1.w, src2.w); + } + }; + template <> struct OpUnroller<8> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.a0 = op(src.a0); + if (mask(y, x_shifted + 1)) + dst.a1 = op(src.a1); + if (mask(y, x_shifted + 2)) + dst.a2 = op(src.a2); + if (mask(y, x_shifted + 3)) + dst.a3 = op(src.a3); + if (mask(y, x_shifted + 4)) + dst.a4 = op(src.a4); + if (mask(y, x_shifted + 5)) + dst.a5 = op(src.a5); + if (mask(y, x_shifted + 6)) + dst.a6 = op(src.a6); + if (mask(y, x_shifted + 7)) + dst.a7 = op(src.a7); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.a0 = op(src1.a0, src2.a0); + if (mask(y, x_shifted + 1)) + dst.a1 = op(src1.a1, src2.a1); + if (mask(y, x_shifted + 2)) + dst.a2 = op(src1.a2, src2.a2); + if (mask(y, x_shifted + 3)) + dst.a3 = op(src1.a3, src2.a3); + if (mask(y, x_shifted + 4)) + dst.a4 = op(src1.a4, src2.a4); + if (mask(y, x_shifted + 5)) + dst.a5 = op(src1.a5, src2.a5); + if (mask(y, x_shifted + 6)) + dst.a6 = op(src1.a6, src2.a6); + if (mask(y, x_shifted + 7)) + dst.a7 = op(src1.a7, src2.a7); + } + }; + + template + static __global__ void transformSmart(const PtrStepSz src_, PtrStep dst_, const Mask mask, const UnOp op) + { + typedef TransformFunctorTraits ft; + typedef typename UnaryReadWriteTraits::read_type read_type; + typedef typename UnaryReadWriteTraits::write_type write_type; + + const int x = threadIdx.x + blockIdx.x * blockDim.x; + const int y = threadIdx.y + blockIdx.y * blockDim.y; + const int x_shifted = x * ft::smart_shift; + + if (y < src_.rows) + { + const T* src = src_.ptr(y); + D* dst = dst_.ptr(y); + + if (x_shifted + ft::smart_shift - 1 < src_.cols) + { + const read_type src_n_el = ((const read_type*)src)[x]; + write_type dst_n_el = ((const write_type*)dst)[x]; + + OpUnroller::unroll(src_n_el, dst_n_el, mask, op, x_shifted, y); + + ((write_type*)dst)[x] = dst_n_el; + } + else + { + for (int real_x = x_shifted; real_x < src_.cols; ++real_x) + { + if (mask(y, real_x)) + dst[real_x] = op(src[real_x]); + } + } + } + } + + template + __global__ static void transformSimple(const PtrStepSz src, PtrStep dst, const Mask mask, const UnOp op) + { + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < src.cols && y < src.rows && mask(y, x)) + { + dst.ptr(y)[x] = op(src.ptr(y)[x]); + } + } + + template + static __global__ void transformSmart(const PtrStepSz src1_, const PtrStep src2_, PtrStep dst_, + const Mask mask, const BinOp op) + { + typedef TransformFunctorTraits ft; + typedef typename BinaryReadWriteTraits::read_type1 read_type1; + typedef typename BinaryReadWriteTraits::read_type2 read_type2; + typedef typename BinaryReadWriteTraits::write_type write_type; + + const int x = threadIdx.x + blockIdx.x * blockDim.x; + const int y = threadIdx.y + blockIdx.y * blockDim.y; + const int x_shifted = x * ft::smart_shift; + + if (y < src1_.rows) + { + const T1* src1 = src1_.ptr(y); + const T2* src2 = src2_.ptr(y); + D* dst = dst_.ptr(y); + + if (x_shifted + ft::smart_shift - 1 < src1_.cols) + { + const read_type1 src1_n_el = ((const read_type1*)src1)[x]; + const read_type2 src2_n_el = ((const read_type2*)src2)[x]; + write_type dst_n_el = ((const write_type*)dst)[x]; + + OpUnroller::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y); + + ((write_type*)dst)[x] = dst_n_el; + } + else + { + for (int real_x = x_shifted; real_x < src1_.cols; ++real_x) + { + if (mask(y, real_x)) + dst[real_x] = op(src1[real_x], src2[real_x]); + } + } + } + } + + template + static __global__ void transformSimple(const PtrStepSz src1, const PtrStep src2, PtrStep dst, + const Mask mask, const BinOp op) + { + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < src1.cols && y < src1.rows && mask(y, x)) + { + const T1 src1_data = src1.ptr(y)[x]; + const T2 src2_data = src2.ptr(y)[x]; + dst.ptr(y)[x] = op(src1_data, src2_data); + } + } + + template struct TransformDispatcher; + template<> struct TransformDispatcher + { + template + static void call(PtrStepSz src, PtrStepSz dst, UnOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); + const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1); + + transformSimple<<>>(src, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + + template + static void call(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); + const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1); + + transformSimple<<>>(src1, src2, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + }; + template<> struct TransformDispatcher + { + template + static void call(PtrStepSz src, PtrStepSz dst, UnOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + StaticAssert::check(); + + if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) || + !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) + { + TransformDispatcher::call(src, dst, op, mask, stream); + return; + } + + const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); + const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1); + + transformSmart<<>>(src, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + + template + static void call(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + StaticAssert::check(); + + if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) || + !isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) || + !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) + { + TransformDispatcher::call(src1, src2, dst, op, mask, stream); + return; + } + + const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); + const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1); + + transformSmart<<>>(src1, src2, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + }; + } // namespace transform_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_TRANSFORM_DETAIL_HPP__ diff --git a/OpenCV/Headers/gpu/device/detail/type_traits_detail.hpp b/OpenCV/Headers/gpu/device/detail/type_traits_detail.hpp new file mode 100644 index 0000000000..97ff00d8f6 --- /dev/null +++ b/OpenCV/Headers/gpu/device/detail/type_traits_detail.hpp @@ -0,0 +1,187 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__ +#define __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__ + +#include "../common.hpp" +#include "../vec_traits.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace type_traits_detail + { + template struct Select { typedef T1 type; }; + template struct Select { typedef T2 type; }; + + template struct IsSignedIntergral { enum {value = 0}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + + template struct IsUnsignedIntegral { enum {value = 0}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + + template struct IsIntegral { enum {value = IsSignedIntergral::value || IsUnsignedIntegral::value}; }; + template <> struct IsIntegral { enum {value = 1}; }; + template <> struct IsIntegral { enum {value = 1}; }; + + template struct IsFloat { enum {value = 0}; }; + template <> struct IsFloat { enum {value = 1}; }; + template <> struct IsFloat { enum {value = 1}; }; + + template struct IsVec { enum {value = 0}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + + template struct AddParameterType { typedef const U& type; }; + template struct AddParameterType { typedef U& type; }; + template <> struct AddParameterType { typedef void type; }; + + template struct ReferenceTraits + { + enum { value = false }; + typedef U type; + }; + template struct ReferenceTraits + { + enum { value = true }; + typedef U type; + }; + + template struct PointerTraits + { + enum { value = false }; + typedef void type; + }; + template struct PointerTraits + { + enum { value = true }; + typedef U type; + }; + template struct PointerTraits + { + enum { value = true }; + typedef U type; + }; + + template struct UnConst + { + typedef U type; + enum { value = 0 }; + }; + template struct UnConst + { + typedef U type; + enum { value = 1 }; + }; + template struct UnConst + { + typedef U& type; + enum { value = 1 }; + }; + + template struct UnVolatile + { + typedef U type; + enum { value = 0 }; + }; + template struct UnVolatile + { + typedef U type; + enum { value = 1 }; + }; + template struct UnVolatile + { + typedef U& type; + enum { value = 1 }; + }; + } // namespace type_traits_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__ diff --git a/OpenCV/Headers/gpu/device/detail/vec_distance_detail.hpp b/OpenCV/Headers/gpu/device/detail/vec_distance_detail.hpp new file mode 100644 index 0000000000..78ab5565cb --- /dev/null +++ b/OpenCV/Headers/gpu/device/detail/vec_distance_detail.hpp @@ -0,0 +1,117 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__ +#define __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__ + +#include "../datamov_utils.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace vec_distance_detail + { + template struct UnrollVecDiffCached + { + template + static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind) + { + if (ind < len) + { + T1 val1 = *vecCached++; + + T2 val2; + ForceGlob::Load(vecGlob, ind, val2); + + dist.reduceIter(val1, val2); + + UnrollVecDiffCached::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM); + } + } + + template + static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist) + { + T1 val1 = *vecCached++; + + T2 val2; + ForceGlob::Load(vecGlob, 0, val2); + vecGlob += THREAD_DIM; + + dist.reduceIter(val1, val2); + + UnrollVecDiffCached::calcWithoutCheck(vecCached, vecGlob, dist); + } + }; + template struct UnrollVecDiffCached + { + template + static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int) + { + } + + template + static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&) + { + } + }; + + template struct VecDiffCachedCalculator; + template struct VecDiffCachedCalculator + { + template + static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid) + { + UnrollVecDiffCached::calcCheck(vecCached, vecGlob, len, dist, tid); + } + }; + template struct VecDiffCachedCalculator + { + template + static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid) + { + UnrollVecDiffCached::calcWithoutCheck(vecCached, vecGlob + tid, dist); + } + }; + } // namespace vec_distance_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__ diff --git a/OpenCV/Headers/gpu/device/dynamic_smem.hpp b/OpenCV/Headers/gpu/device/dynamic_smem.hpp new file mode 100644 index 0000000000..cf431d9524 --- /dev/null +++ b/OpenCV/Headers/gpu/device/dynamic_smem.hpp @@ -0,0 +1,80 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DYNAMIC_SMEM_HPP__ +#define __OPENCV_GPU_DYNAMIC_SMEM_HPP__ + +namespace cv { namespace gpu { namespace device +{ + template struct DynamicSharedMem + { + __device__ __forceinline__ operator T*() + { + extern __shared__ int __smem[]; + return (T*)__smem; + } + + __device__ __forceinline__ operator const T*() const + { + extern __shared__ int __smem[]; + return (T*)__smem; + } + }; + + // specialize for double to avoid unaligned memory access compile errors + template<> struct DynamicSharedMem + { + __device__ __forceinline__ operator double*() + { + extern __shared__ double __smem_d[]; + return (double*)__smem_d; + } + + __device__ __forceinline__ operator const double*() const + { + extern __shared__ double __smem_d[]; + return (double*)__smem_d; + } + }; +}}} + +#endif // __OPENCV_GPU_DYNAMIC_SMEM_HPP__ diff --git a/OpenCV/Headers/gpu/device/emulation.hpp b/OpenCV/Headers/gpu/device/emulation.hpp new file mode 100644 index 0000000000..074e911275 --- /dev/null +++ b/OpenCV/Headers/gpu/device/emulation.hpp @@ -0,0 +1,139 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_GPU_EMULATION_HPP_ +#define OPENCV_GPU_EMULATION_HPP_ + +#include "warp_reduce.hpp" +#include + +namespace cv { namespace gpu { namespace device +{ + struct Emulation + { + + static __device__ __forceinline__ int syncthreadsOr(int pred) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) + // just campilation stab + return 0; +#else + return __syncthreads_or(pred); +#endif + } + + template + static __forceinline__ __device__ int Ballot(int predicate) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200) + return __ballot(predicate); +#else + __shared__ volatile int cta_buffer[CTA_SIZE]; + + int tid = threadIdx.x; + cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0; + return warp_reduce(cta_buffer); +#endif + } + + struct smem + { + enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U }; + + template + static __device__ __forceinline__ T atomicInc(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count; + unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U); + do + { + count = *address & TAG_MASK; + count = tag | (count + 1); + *address = count; + } while (*address != count); + + return (count & TAG_MASK) - 1; +#else + return ::atomicInc(address, val); +#endif + } + + template + static __device__ __forceinline__ T atomicAdd(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count; + unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U); + do + { + count = *address & TAG_MASK; + count = tag | (count + val); + *address = count; + } while (*address != count); + + return (count & TAG_MASK) - val; +#else + return ::atomicAdd(address, val); +#endif + } + + template + static __device__ __forceinline__ T atomicMin(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count = ::min(*address, val); + do + { + *address = count; + } while (*address > count); + + return count; +#else + return ::atomicMin(address, val); +#endif + } + }; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif /* OPENCV_GPU_EMULATION_HPP_ */ diff --git a/OpenCV/Headers/gpu/device/filters.hpp b/OpenCV/Headers/gpu/device/filters.hpp new file mode 100644 index 0000000000..d193969a79 --- /dev/null +++ b/OpenCV/Headers/gpu/device/filters.hpp @@ -0,0 +1,278 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_FILTERS_HPP__ +#define __OPENCV_GPU_FILTERS_HPP__ + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "vec_math.hpp" +#include "type_traits.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct PointFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + (void)fx; + (void)fy; + } + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + return src(__float2int_rz(y), __float2int_rz(x)); + } + + const Ptr2D src; + }; + + template struct LinearFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + (void)fx; + (void)fy; + } + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + typedef typename TypeVec::cn>::vec_type work_type; + + work_type out = VecTraits::all(0); + + const int x1 = __float2int_rd(x); + const int y1 = __float2int_rd(y); + const int x2 = x1 + 1; + const int y2 = y1 + 1; + + elem_type src_reg = src(y1, x1); + out = out + src_reg * ((x2 - x) * (y2 - y)); + + src_reg = src(y1, x2); + out = out + src_reg * ((x - x1) * (y2 - y)); + + src_reg = src(y2, x1); + out = out + src_reg * ((x2 - x) * (y - y1)); + + src_reg = src(y2, x2); + out = out + src_reg * ((x - x1) * (y - y1)); + + return saturate_cast(out); + } + + const Ptr2D src; + }; + + template struct CubicFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + typedef typename TypeVec::cn>::vec_type work_type; + + explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + (void)fx; + (void)fy; + } + + static __device__ __forceinline__ float bicubicCoeff(float x_) + { + float x = fabsf(x_); + if (x <= 1.0f) + { + return x * x * (1.5f * x - 2.5f) + 1.0f; + } + else if (x < 2.0f) + { + return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; + } + else + { + return 0.0f; + } + } + + __device__ elem_type operator ()(float y, float x) const + { + const float xmin = ::ceilf(x - 2.0f); + const float xmax = ::floorf(x + 2.0f); + + const float ymin = ::ceilf(y - 2.0f); + const float ymax = ::floorf(y + 2.0f); + + work_type sum = VecTraits::all(0); + float wsum = 0.0f; + + for (float cy = ymin; cy <= ymax; cy += 1.0f) + { + for (float cx = xmin; cx <= xmax; cx += 1.0f) + { + const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy); + sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx)); + wsum += w; + } + } + + work_type res = (!wsum)? VecTraits::all(0) : sum / wsum; + + return saturate_cast(res); + } + + const Ptr2D src; + }; + // for integer scaling + template struct IntegerAreaFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_) + : src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {} + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + float fsx1 = x * scale_x; + float fsx2 = fsx1 + scale_x; + + int sx1 = __float2int_ru(fsx1); + int sx2 = __float2int_rd(fsx2); + + float fsy1 = y * scale_y; + float fsy2 = fsy1 + scale_y; + + int sy1 = __float2int_ru(fsy1); + int sy2 = __float2int_rd(fsy2); + + typedef typename TypeVec::cn>::vec_type work_type; + work_type out = VecTraits::all(0.f); + + for(int dy = sy1; dy < sy2; ++dy) + for(int dx = sx1; dx < sx2; ++dx) + { + out = out + src(dy, dx) * scale; + } + + return saturate_cast(out); + } + + const Ptr2D src; + float scale_x, scale_y ,scale; + }; + + template struct AreaFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_) + : src(src_), scale_x(scale_x_), scale_y(scale_y_){} + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + float fsx1 = x * scale_x; + float fsx2 = fsx1 + scale_x; + + int sx1 = __float2int_ru(fsx1); + int sx2 = __float2int_rd(fsx2); + + float fsy1 = y * scale_y; + float fsy2 = fsy1 + scale_y; + + int sy1 = __float2int_ru(fsy1); + int sy2 = __float2int_rd(fsy2); + + float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1)); + + typedef typename TypeVec::cn>::vec_type work_type; + work_type out = VecTraits::all(0.f); + + for (int dy = sy1; dy < sy2; ++dy) + { + for (int dx = sx1; dx < sx2; ++dx) + out = out + src(dy, dx) * scale; + + if (sx1 > fsx1) + out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale); + + if (sx2 < fsx2) + out = out + src(dy, sx2) * ((fsx2 -sx2) * scale); + } + + if (sy1 > fsy1) + for (int dx = sx1; dx < sx2; ++dx) + out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale); + + if (sy2 < fsy2) + for (int dx = sx1; dx < sx2; ++dx) + out = out + src(sy2, dx) * ((fsy2 -sy2) * scale); + + if ((sy1 > fsy1) && (sx1 > fsx1)) + out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale); + + if ((sy1 > fsy1) && (sx2 < fsx2)) + out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale); + + if ((sy2 < fsy2) && (sx2 < fsx2)) + out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale); + + if ((sy2 < fsy2) && (sx1 > fsx1)) + out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale); + + return saturate_cast(out); + } + + const Ptr2D src; + float scale_x, scale_y; + int width, haight; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_FILTERS_HPP__ diff --git a/OpenCV/Headers/gpu/device/funcattrib.hpp b/OpenCV/Headers/gpu/device/funcattrib.hpp new file mode 100644 index 0000000000..05df4d10b4 --- /dev/null +++ b/OpenCV/Headers/gpu/device/funcattrib.hpp @@ -0,0 +1,72 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ +#define __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ + +#include + +namespace cv { namespace gpu { namespace device +{ + template + void printFuncAttrib(Func& func) + { + + cudaFuncAttributes attrs; + cudaFuncGetAttributes(&attrs, func); + + printf("=== Function stats ===\n"); + printf("Name: \n"); + printf("sharedSizeBytes = %d\n", attrs.sharedSizeBytes); + printf("constSizeBytes = %d\n", attrs.constSizeBytes); + printf("localSizeBytes = %d\n", attrs.localSizeBytes); + printf("maxThreadsPerBlock = %d\n", attrs.maxThreadsPerBlock); + printf("numRegs = %d\n", attrs.numRegs); + printf("ptxVersion = %d\n", attrs.ptxVersion); + printf("binaryVersion = %d\n", attrs.binaryVersion); + printf("\n"); + fflush(stdout); + } +}}} // namespace cv { namespace gpu { namespace device + +#endif /* __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ */ \ No newline at end of file diff --git a/OpenCV/Headers/gpu/device/functional.hpp b/OpenCV/Headers/gpu/device/functional.hpp new file mode 100644 index 0000000000..c601cf5273 --- /dev/null +++ b/OpenCV/Headers/gpu/device/functional.hpp @@ -0,0 +1,686 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_FUNCTIONAL_HPP__ +#define __OPENCV_GPU_FUNCTIONAL_HPP__ + +#include +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "type_traits.hpp" +#include "device_functions.h" + +namespace cv { namespace gpu { namespace device +{ + // Function Objects + template struct unary_function : public std::unary_function {}; + template struct binary_function : public std::binary_function {}; + + // Arithmetic Operations + template struct plus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a + b; + } + __device__ __forceinline__ plus(const plus& other):binary_function(){} + __device__ __forceinline__ plus():binary_function(){} + }; + + template struct minus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a - b; + } + __device__ __forceinline__ minus(const minus& other):binary_function(){} + __device__ __forceinline__ minus():binary_function(){} + }; + + template struct multiplies : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a * b; + } + __device__ __forceinline__ multiplies(const multiplies& other):binary_function(){} + __device__ __forceinline__ multiplies():binary_function(){} + }; + + template struct divides : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a / b; + } + __device__ __forceinline__ divides(const divides& other):binary_function(){} + __device__ __forceinline__ divides():binary_function(){} + }; + + template struct modulus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a % b; + } + __device__ __forceinline__ modulus(const modulus& other):binary_function(){} + __device__ __forceinline__ modulus():binary_function(){} + }; + + template struct negate : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a) const + { + return -a; + } + __device__ __forceinline__ negate(const negate& other):unary_function(){} + __device__ __forceinline__ negate():unary_function(){} + }; + + // Comparison Operations + template struct equal_to : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a == b; + } + __device__ __forceinline__ equal_to(const equal_to& other):binary_function(){} + __device__ __forceinline__ equal_to():binary_function(){} + }; + + template struct not_equal_to : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a != b; + } + __device__ __forceinline__ not_equal_to(const not_equal_to& other):binary_function(){} + __device__ __forceinline__ not_equal_to():binary_function(){} + }; + + template struct greater : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a > b; + } + __device__ __forceinline__ greater(const greater& other):binary_function(){} + __device__ __forceinline__ greater():binary_function(){} + }; + + template struct less : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a < b; + } + __device__ __forceinline__ less(const less& other):binary_function(){} + __device__ __forceinline__ less():binary_function(){} + }; + + template struct greater_equal : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a >= b; + } + __device__ __forceinline__ greater_equal(const greater_equal& other):binary_function(){} + __device__ __forceinline__ greater_equal():binary_function(){} + }; + + template struct less_equal : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a <= b; + } + __device__ __forceinline__ less_equal(const less_equal& other):binary_function(){} + __device__ __forceinline__ less_equal():binary_function(){} + }; + + // Logical Operations + template struct logical_and : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a && b; + } + __device__ __forceinline__ logical_and(const logical_and& other):binary_function(){} + __device__ __forceinline__ logical_and():binary_function(){} + }; + + template struct logical_or : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a || b; + } + __device__ __forceinline__ logical_or(const logical_or& other):binary_function(){} + __device__ __forceinline__ logical_or():binary_function(){} + }; + + template struct logical_not : unary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a) const + { + return !a; + } + __device__ __forceinline__ logical_not(const logical_not& other):unary_function(){} + __device__ __forceinline__ logical_not():unary_function(){} + }; + + // Bitwise Operations + template struct bit_and : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a & b; + } + __device__ __forceinline__ bit_and(const bit_and& other):binary_function(){} + __device__ __forceinline__ bit_and():binary_function(){} + }; + + template struct bit_or : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a | b; + } + __device__ __forceinline__ bit_or(const bit_or& other):binary_function(){} + __device__ __forceinline__ bit_or():binary_function(){} + }; + + template struct bit_xor : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a ^ b; + } + __device__ __forceinline__ bit_xor(const bit_xor& other):binary_function(){} + __device__ __forceinline__ bit_xor():binary_function(){} + }; + + template struct bit_not : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType v) const + { + return ~v; + } + __device__ __forceinline__ bit_not(const bit_not& other):unary_function(){} + __device__ __forceinline__ bit_not():unary_function(){} + }; + + // Generalized Identity Operations + template struct identity : unary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType x) const + { + return x; + } + __device__ __forceinline__ identity(const identity& other):unary_function(){} + __device__ __forceinline__ identity():unary_function(){} + }; + + template struct project1st : binary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return lhs; + } + __device__ __forceinline__ project1st(const project1st& other):binary_function(){} + __device__ __forceinline__ project1st():binary_function(){} + }; + + template struct project2nd : binary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return rhs; + } + __device__ __forceinline__ project2nd(const project2nd& other):binary_function(){} + __device__ __forceinline__ project2nd():binary_function(){} + }; + + // Min/Max Operations + +#define OPENCV_GPU_IMPLEMENT_MINMAX(name, type, op) \ + template <> struct name : binary_function \ + { \ + __device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \ + __device__ __forceinline__ name(const name& other):binary_function(){}\ + __device__ __forceinline__ name():binary_function(){}\ + }; + + template struct maximum : binary_function + { + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return lhs < rhs ? rhs : lhs; + } + __device__ __forceinline__ maximum(const maximum& other):binary_function(){} + __device__ __forceinline__ maximum():binary_function(){} + }; + + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, schar, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, char, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, ushort, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, short, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, int, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uint, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, float, ::fmax) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, double, ::fmax) + + template struct minimum : binary_function + { + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return lhs < rhs ? lhs : rhs; + } + __device__ __forceinline__ minimum(const minimum& other):binary_function(){} + __device__ __forceinline__ minimum():binary_function(){} + }; + + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, schar, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, char, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, ushort, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, short, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, int, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uint, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, float, ::fmin) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, double, ::fmin) + +#undef OPENCV_GPU_IMPLEMENT_MINMAX + + // Math functions +///bound========================================= +#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \ + template struct name ## _func : unary_function \ + { \ + __device__ __forceinline__ float operator ()(typename TypeTraits::ParameterType v) const \ + { \ + return func ## f(v); \ + } \ + }; \ + template <> struct name ## _func : unary_function \ + { \ + __device__ __forceinline__ double operator ()(double v) const \ + { \ + return func(v); \ + } \ + }; + +#define OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(name, func) \ + template struct name ## _func : binary_function \ + { \ + __device__ __forceinline__ float operator ()(typename TypeTraits::ParameterType v1, typename TypeTraits::ParameterType v2) const \ + { \ + return func ## f(v1, v2); \ + } \ + }; \ + template <> struct name ## _func : binary_function \ + { \ + __device__ __forceinline__ double operator ()(double v1, double v2) const \ + { \ + return func(v1, v2); \ + } \ + }; + + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(fabs, ::fabs) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp, ::exp) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log, ::log) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log2, ::log2) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log10, ::log10) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sin, ::sin) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cos, ::cos) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tan, ::tan) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asin, ::asin) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acos, ::acos) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atan, ::atan) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh) + + OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot) + OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2) + OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(pow, ::pow) + + #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR + #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE + #undef OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR + + template struct hypot_sqr_func : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType src1, typename TypeTraits::ParameterType src2) const + { + return src1 * src1 + src2 * src2; + } + __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func& other) : binary_function(){} + __device__ __forceinline__ hypot_sqr_func() : binary_function(){} + }; + + // Saturate Cast Functor + template struct saturate_cast_func : unary_function + { + __device__ __forceinline__ D operator ()(typename TypeTraits::ParameterType v) const + { + return saturate_cast(v); + } + __device__ __forceinline__ saturate_cast_func(const saturate_cast_func& other):unary_function(){} + __device__ __forceinline__ saturate_cast_func():unary_function(){} + }; + + // Threshold Functors + template struct thresh_binary_func : unary_function + { + __host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src > thresh) * maxVal; + } + + __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other) + : unary_function(), thresh(other.thresh), maxVal(other.maxVal){} + + __device__ __forceinline__ thresh_binary_func():unary_function(){} + + const T thresh; + const T maxVal; + }; + + template struct thresh_binary_inv_func : unary_function + { + __host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src <= thresh) * maxVal; + } + + __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other) + : unary_function(), thresh(other.thresh), maxVal(other.maxVal){} + + __device__ __forceinline__ thresh_binary_inv_func():unary_function(){} + + const T thresh; + const T maxVal; + }; + + template struct thresh_trunc_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return minimum()(src, thresh); + } + + __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other) + : unary_function(), thresh(other.thresh){} + + __device__ __forceinline__ thresh_trunc_func():unary_function(){} + + const T thresh; + }; + + template struct thresh_to_zero_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src > thresh) * src; + } + __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other) + : unary_function(), thresh(other.thresh){} + + __device__ __forceinline__ thresh_to_zero_func():unary_function(){} + + const T thresh; + }; + + template struct thresh_to_zero_inv_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src <= thresh) * src; + } + __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other) + : unary_function(), thresh(other.thresh){} + + __device__ __forceinline__ thresh_to_zero_inv_func():unary_function(){} + + const T thresh; + }; +//bound!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ============> + // Function Object Adaptors + template struct unary_negate : unary_function + { + explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {} + + __device__ __forceinline__ bool operator()(typename TypeTraits::ParameterType x) const + { + return !pred(x); + } + + __device__ __forceinline__ unary_negate(const unary_negate& other) : unary_function(){} + __device__ __forceinline__ unary_negate() : unary_function(){} + + const Predicate pred; + }; + + template __host__ __device__ __forceinline__ unary_negate not1(const Predicate& pred) + { + return unary_negate(pred); + } + + template struct binary_negate : binary_function + { + explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {} + + __device__ __forceinline__ bool operator()(typename TypeTraits::ParameterType x, + typename TypeTraits::ParameterType y) const + { + return !pred(x,y); + } + __device__ __forceinline__ binary_negate(const binary_negate& other) + : binary_function(){} + + __device__ __forceinline__ binary_negate() : + binary_function(){} + + const Predicate pred; + }; + + template __host__ __device__ __forceinline__ binary_negate not2(const BinaryPredicate& pred) + { + return binary_negate(pred); + } + + template struct binder1st : unary_function + { + __host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {} + + __device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits::ParameterType a) const + { + return op(arg1, a); + } + + __device__ __forceinline__ binder1st(const binder1st& other) : + unary_function(){} + + const Op op; + const typename Op::first_argument_type arg1; + }; + + template __host__ __device__ __forceinline__ binder1st bind1st(const Op& op, const T& x) + { + return binder1st(op, typename Op::first_argument_type(x)); + } + + template struct binder2nd : unary_function + { + __host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {} + + __forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits::ParameterType a) const + { + return op(a, arg2); + } + + __device__ __forceinline__ binder2nd(const binder2nd& other) : + unary_function(), op(other.op), arg2(other.arg2){} + + const Op op; + const typename Op::second_argument_type arg2; + }; + + template __host__ __device__ __forceinline__ binder2nd bind2nd(const Op& op, const T& x) + { + return binder2nd(op, typename Op::second_argument_type(x)); + } + + // Functor Traits + template struct IsUnaryFunction + { + typedef char Yes; + struct No {Yes a[2];}; + + template static Yes check(unary_function); + static No check(...); + + static F makeF(); + + enum { value = (sizeof(check(makeF())) == sizeof(Yes)) }; + }; + + template struct IsBinaryFunction + { + typedef char Yes; + struct No {Yes a[2];}; + + template static Yes check(binary_function); + static No check(...); + + static F makeF(); + + enum { value = (sizeof(check(makeF())) == sizeof(Yes)) }; + }; + + namespace functional_detail + { + template struct UnOpShift { enum { shift = 1 }; }; + template struct UnOpShift { enum { shift = 4 }; }; + template struct UnOpShift { enum { shift = 2 }; }; + + template struct DefaultUnaryShift + { + enum { shift = UnOpShift::shift }; + }; + + template struct BinOpShift { enum { shift = 1 }; }; + template struct BinOpShift { enum { shift = 4 }; }; + template struct BinOpShift { enum { shift = 2 }; }; + + template struct DefaultBinaryShift + { + enum { shift = BinOpShift::shift }; + }; + + template ::value> struct ShiftDispatcher; + template struct ShiftDispatcher + { + enum { shift = DefaultUnaryShift::shift }; + }; + template struct ShiftDispatcher + { + enum { shift = DefaultBinaryShift::shift }; + }; + } + + template struct DefaultTransformShift + { + enum { shift = functional_detail::ShiftDispatcher::shift }; + }; + + template struct DefaultTransformFunctorTraits + { + enum { simple_block_dim_x = 16 }; + enum { simple_block_dim_y = 16 }; + + enum { smart_block_dim_x = 16 }; + enum { smart_block_dim_y = 16 }; + enum { smart_shift = DefaultTransformShift::shift }; + }; + + template struct TransformFunctorTraits : DefaultTransformFunctorTraits {}; + +#define OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(type) \ + template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type > +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_FUNCTIONAL_HPP__ diff --git a/OpenCV/Headers/gpu/device/limits.hpp b/OpenCV/Headers/gpu/device/limits.hpp new file mode 100644 index 0000000000..b040f199d6 --- /dev/null +++ b/OpenCV/Headers/gpu/device/limits.hpp @@ -0,0 +1,235 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_LIMITS_GPU_HPP__ +#define __OPENCV_GPU_LIMITS_GPU_HPP__ + +#include +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct numeric_limits + { + typedef T type; + __device__ __forceinline__ static type min() { return type(); }; + __device__ __forceinline__ static type max() { return type(); }; + __device__ __forceinline__ static type epsilon() { return type(); } + __device__ __forceinline__ static type round_error() { return type(); } + __device__ __forceinline__ static type denorm_min() { return type(); } + __device__ __forceinline__ static type infinity() { return type(); } + __device__ __forceinline__ static type quiet_NaN() { return type(); } + __device__ __forceinline__ static type signaling_NaN() { return T(); } + static const bool is_signed; + }; + + template<> struct numeric_limits + { + typedef bool type; + __device__ __forceinline__ static type min() { return false; }; + __device__ __forceinline__ static type max() { return true; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = false; + }; + + template<> struct numeric_limits + { + typedef char type; + __device__ __forceinline__ static type min() { return CHAR_MIN; }; + __device__ __forceinline__ static type max() { return CHAR_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = (char)-1 == -1; + }; + + template<> struct numeric_limits + { + typedef char type; + __device__ __forceinline__ static type min() { return SCHAR_MIN; }; + __device__ __forceinline__ static type max() { return SCHAR_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = (signed char)-1 == -1; + }; + + template<> struct numeric_limits + { + typedef unsigned char type; + __device__ __forceinline__ static type min() { return 0; }; + __device__ __forceinline__ static type max() { return UCHAR_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = false; + }; + + template<> struct numeric_limits + { + typedef short type; + __device__ __forceinline__ static type min() { return SHRT_MIN; }; + __device__ __forceinline__ static type max() { return SHRT_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = true; + }; + + template<> struct numeric_limits + { + typedef unsigned short type; + __device__ __forceinline__ static type min() { return 0; }; + __device__ __forceinline__ static type max() { return USHRT_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = false; + }; + + template<> struct numeric_limits + { + typedef int type; + __device__ __forceinline__ static type min() { return INT_MIN; }; + __device__ __forceinline__ static type max() { return INT_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = true; + }; + + + template<> struct numeric_limits + { + typedef unsigned int type; + __device__ __forceinline__ static type min() { return 0; }; + __device__ __forceinline__ static type max() { return UINT_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = false; + }; + + template<> struct numeric_limits + { + typedef long type; + __device__ __forceinline__ static type min() { return LONG_MIN; }; + __device__ __forceinline__ static type max() { return LONG_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = true; + }; + + template<> struct numeric_limits + { + typedef unsigned long type; + __device__ __forceinline__ static type min() { return 0; }; + __device__ __forceinline__ static type max() { return ULONG_MAX; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = false; + }; + + template<> struct numeric_limits + { + typedef float type; + __device__ __forceinline__ static type min() { return 1.175494351e-38f/*FLT_MIN*/; }; + __device__ __forceinline__ static type max() { return 3.402823466e+38f/*FLT_MAX*/; }; + __device__ __forceinline__ static type epsilon() { return 1.192092896e-07f/*FLT_EPSILON*/; }; + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = true; + }; + + template<> struct numeric_limits + { + typedef double type; + __device__ __forceinline__ static type min() { return 2.2250738585072014e-308/*DBL_MIN*/; }; + __device__ __forceinline__ static type max() { return 1.7976931348623158e+308/*DBL_MAX*/; }; + __device__ __forceinline__ static type epsilon(); + __device__ __forceinline__ static type round_error(); + __device__ __forceinline__ static type denorm_min(); + __device__ __forceinline__ static type infinity(); + __device__ __forceinline__ static type quiet_NaN(); + __device__ __forceinline__ static type signaling_NaN(); + static const bool is_signed = true; + }; +}}} // namespace cv { namespace gpu { namespace device { + +#endif // __OPENCV_GPU_LIMITS_GPU_HPP__ diff --git a/OpenCV/Headers/gpu/device/saturate_cast.hpp b/OpenCV/Headers/gpu/device/saturate_cast.hpp new file mode 100644 index 0000000000..7bb1da751f --- /dev/null +++ b/OpenCV/Headers/gpu/device/saturate_cast.hpp @@ -0,0 +1,216 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_SATURATE_CAST_HPP__ +#define __OPENCV_GPU_SATURATE_CAST_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + template __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); } + + template<> __device__ __forceinline__ uchar saturate_cast(schar v) + { + return (uchar) ::max((int)v, 0); + } + template<> __device__ __forceinline__ uchar saturate_cast(ushort v) + { + return (uchar) ::min((uint)v, (uint)UCHAR_MAX); + } + template<> __device__ __forceinline__ uchar saturate_cast(int v) + { + return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); + } + template<> __device__ __forceinline__ uchar saturate_cast(uint v) + { + return (uchar) ::min(v, (uint)UCHAR_MAX); + } + template<> __device__ __forceinline__ uchar saturate_cast(short v) + { + return saturate_cast((uint)v); + } + + template<> __device__ __forceinline__ uchar saturate_cast(float v) + { + int iv = __float2int_rn(v); + return saturate_cast(iv); + } + template<> __device__ __forceinline__ uchar saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + int iv = __double2int_rn(v); + return saturate_cast(iv); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ schar saturate_cast(uchar v) + { + return (schar) ::min((int)v, SCHAR_MAX); + } + template<> __device__ __forceinline__ schar saturate_cast(ushort v) + { + return (schar) ::min((uint)v, (uint)SCHAR_MAX); + } + template<> __device__ __forceinline__ schar saturate_cast(int v) + { + return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); + } + template<> __device__ __forceinline__ schar saturate_cast(short v) + { + return saturate_cast((int)v); + } + template<> __device__ __forceinline__ schar saturate_cast(uint v) + { + return (schar) ::min(v, (uint)SCHAR_MAX); + } + + template<> __device__ __forceinline__ schar saturate_cast(float v) + { + int iv = __float2int_rn(v); + return saturate_cast(iv); + } + template<> __device__ __forceinline__ schar saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + int iv = __double2int_rn(v); + return saturate_cast(iv); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ ushort saturate_cast(schar v) + { + return (ushort) ::max((int)v, 0); + } + template<> __device__ __forceinline__ ushort saturate_cast(short v) + { + return (ushort) ::max((int)v, 0); + } + template<> __device__ __forceinline__ ushort saturate_cast(int v) + { + return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); + } + template<> __device__ __forceinline__ ushort saturate_cast(uint v) + { + return (ushort) ::min(v, (uint)USHRT_MAX); + } + template<> __device__ __forceinline__ ushort saturate_cast(float v) + { + int iv = __float2int_rn(v); + return saturate_cast(iv); + } + template<> __device__ __forceinline__ ushort saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + int iv = __double2int_rn(v); + return saturate_cast(iv); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ short saturate_cast(ushort v) + { + return (short) ::min((int)v, SHRT_MAX); + } + template<> __device__ __forceinline__ short saturate_cast(int v) + { + return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); + } + template<> __device__ __forceinline__ short saturate_cast(uint v) + { + return (short) ::min(v, (uint)SHRT_MAX); + } + template<> __device__ __forceinline__ short saturate_cast(float v) + { + int iv = __float2int_rn(v); + return saturate_cast(iv); + } + template<> __device__ __forceinline__ short saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + int iv = __double2int_rn(v); + return saturate_cast(iv); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ int saturate_cast(float v) + { + return __float2int_rn(v); + } + template<> __device__ __forceinline__ int saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + return __double2int_rn(v); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ uint saturate_cast(float v) + { + return __float2uint_rn(v); + } + template<> __device__ __forceinline__ uint saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + return __double2uint_rn(v); + #else + return saturate_cast((float)v); + #endif + } +}}} + +#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */ diff --git a/OpenCV/Headers/gpu/device/scan.hpp b/OpenCV/Headers/gpu/device/scan.hpp new file mode 100644 index 0000000000..f6dc6937fb --- /dev/null +++ b/OpenCV/Headers/gpu/device/scan.hpp @@ -0,0 +1,171 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_SCAN_HPP__ +#define __OPENCV_GPU_SCAN_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 }; + + template struct WarpScan + { + __device__ __forceinline__ WarpScan() {} + __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; } + + __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) + { + const unsigned int lane = idx & 31; + F op; + + if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]); + if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]); + if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]); + if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]); + if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]); + + if( Kind == INCLUSIVE ) + return ptr [idx]; + else + return (lane > 0) ? ptr [idx - 1] : 0; + } + + __device__ __forceinline__ unsigned int index(const unsigned int tid) + { + return tid; + } + + __device__ __forceinline__ void init(volatile T *ptr){} + + static const int warp_offset = 0; + + typedef WarpScan merge; + }; + + template struct WarpScanNoComp + { + __device__ __forceinline__ WarpScanNoComp() {} + __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; } + + __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) + { + const unsigned int lane = threadIdx.x & 31; + F op; + + ptr [idx ] = op(ptr [idx - 1], ptr [idx]); + ptr [idx ] = op(ptr [idx - 2], ptr [idx]); + ptr [idx ] = op(ptr [idx - 4], ptr [idx]); + ptr [idx ] = op(ptr [idx - 8], ptr [idx]); + ptr [idx ] = op(ptr [idx - 16], ptr [idx]); + + if( Kind == INCLUSIVE ) + return ptr [idx]; + else + return (lane > 0) ? ptr [idx - 1] : 0; + } + + __device__ __forceinline__ unsigned int index(const unsigned int tid) + { + return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask); + } + + __device__ __forceinline__ void init(volatile T *ptr) + { + ptr[threadIdx.x] = 0; + } + + static const int warp_smem_stride = 32 + 16 + 1; + static const int warp_offset = 16; + static const int warp_log = 5; + static const int warp_mask = 31; + + typedef WarpScanNoComp merge; + }; + + template struct BlockScan + { + __device__ __forceinline__ BlockScan() {} + __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; } + + __device__ __forceinline__ T operator()(volatile T *ptr) + { + const unsigned int tid = threadIdx.x; + const unsigned int lane = tid & warp_mask; + const unsigned int warp = tid >> warp_log; + + Sc scan; + typename Sc::merge merge_scan; + const unsigned int idx = scan.index(tid); + + T val = scan(ptr, idx); + __syncthreads (); + + if( warp == 0) + scan.init(ptr); + __syncthreads (); + + if( lane == 31 ) + ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx]; + __syncthreads (); + + if( warp == 0 ) + merge_scan(ptr, idx); + __syncthreads(); + + if ( warp > 0) + val = ptr [scan.warp_offset + warp - 1] + val; + __syncthreads (); + + ptr[idx] = val; + __syncthreads (); + + return val ; + } + + static const int warp_log = 5; + static const int warp_mask = 31; + }; +}}} + +#endif // __OPENCV_GPU_SCAN_HPP__ diff --git a/OpenCV/Headers/gpu/device/static_check.hpp b/OpenCV/Headers/gpu/device/static_check.hpp new file mode 100644 index 0000000000..e77691b7b2 --- /dev/null +++ b/OpenCV/Headers/gpu/device/static_check.hpp @@ -0,0 +1,67 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ +#define __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ + +#if defined(__CUDACC__) + #define __OPENCV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__ +#else + #define __OPENCV_GPU_HOST_DEVICE__ +#endif + +namespace cv { namespace gpu +{ + namespace device + { + template struct Static {}; + + template<> struct Static + { + __OPENCV_GPU_HOST_DEVICE__ static void check() {}; + }; + } +}} + +#undef __OPENCV_GPU_HOST_DEVICE__ + +#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */ diff --git a/OpenCV/Headers/gpu/device/transform.hpp b/OpenCV/Headers/gpu/device/transform.hpp new file mode 100644 index 0000000000..636caac63f --- /dev/null +++ b/OpenCV/Headers/gpu/device/transform.hpp @@ -0,0 +1,67 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TRANSFORM_HPP__ +#define __OPENCV_GPU_TRANSFORM_HPP__ + +#include "common.hpp" +#include "utility.hpp" +#include "detail/transform_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + template + static inline void transform(PtrStepSz src, PtrStepSz dst, UnOp op, const Mask& mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + transform_detail::TransformDispatcher::cn == 1 && VecTraits::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream); + } + + template + static inline void transform(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, const Mask& mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + transform_detail::TransformDispatcher::cn == 1 && VecTraits::cn == 1 && VecTraits::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream); + } +}}} + +#endif // __OPENCV_GPU_TRANSFORM_HPP__ diff --git a/OpenCV/Headers/gpu/device/type_traits.hpp b/OpenCV/Headers/gpu/device/type_traits.hpp new file mode 100644 index 0000000000..1b36acca5d --- /dev/null +++ b/OpenCV/Headers/gpu/device/type_traits.hpp @@ -0,0 +1,82 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TYPE_TRAITS_HPP__ +#define __OPENCV_GPU_TYPE_TRAITS_HPP__ + +#include "detail/type_traits_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct IsSimpleParameter + { + enum {value = type_traits_detail::IsIntegral::value || type_traits_detail::IsFloat::value || + type_traits_detail::PointerTraits::type>::value}; + }; + + template struct TypeTraits + { + typedef typename type_traits_detail::UnConst::type NonConstType; + typedef typename type_traits_detail::UnVolatile::type NonVolatileType; + typedef typename type_traits_detail::UnVolatile::type>::type UnqualifiedType; + typedef typename type_traits_detail::PointerTraits::type PointeeType; + typedef typename type_traits_detail::ReferenceTraits::type ReferredType; + + enum { isConst = type_traits_detail::UnConst::value }; + enum { isVolatile = type_traits_detail::UnVolatile::value }; + + enum { isReference = type_traits_detail::ReferenceTraits::value }; + enum { isPointer = type_traits_detail::PointerTraits::type>::value }; + + enum { isUnsignedInt = type_traits_detail::IsUnsignedIntegral::value }; + enum { isSignedInt = type_traits_detail::IsSignedIntergral::value }; + enum { isIntegral = type_traits_detail::IsIntegral::value }; + enum { isFloat = type_traits_detail::IsFloat::value }; + enum { isArith = isIntegral || isFloat }; + enum { isVec = type_traits_detail::IsVec::value }; + + typedef typename type_traits_detail::Select::value, + T, typename type_traits_detail::AddParameterType::type>::type ParameterType; + }; +}}} + +#endif // __OPENCV_GPU_TYPE_TRAITS_HPP__ diff --git a/OpenCV/Headers/gpu/device/utility.hpp b/OpenCV/Headers/gpu/device/utility.hpp new file mode 100644 index 0000000000..4489a20b15 --- /dev/null +++ b/OpenCV/Headers/gpu/device/utility.hpp @@ -0,0 +1,237 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_UTILITY_HPP__ +#define __OPENCV_GPU_UTILITY_HPP__ + +#include "saturate_cast.hpp" +#include "datamov_utils.hpp" +#include "detail/reduction_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + #define OPENCV_GPU_LOG_WARP_SIZE (5) + #define OPENCV_GPU_WARP_SIZE (1 << OPENCV_GPU_LOG_WARP_SIZE) + #define OPENCV_GPU_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla + #define OPENCV_GPU_MEM_BANKS (1 << OPENCV_GPU_LOG_MEM_BANKS) + + /////////////////////////////////////////////////////////////////////////////// + // swap + + template void __device__ __host__ __forceinline__ swap(T& a, T& b) + { + const T temp = a; + a = b; + b = temp; + } + + /////////////////////////////////////////////////////////////////////////////// + // Mask Reader + + struct SingleMask + { + explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {} + __host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){} + + __device__ __forceinline__ bool operator()(int y, int x) const + { + return mask.ptr(y)[x] != 0; + } + + PtrStepb mask; + }; + + struct SingleMaskChannels + { + __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_) + : mask(mask_), channels(channels_) {} + __host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_) + :mask(mask_.mask), channels(mask_.channels){} + + __device__ __forceinline__ bool operator()(int y, int x) const + { + return mask.ptr(y)[x / channels] != 0; + } + + PtrStepb mask; + int channels; + }; + + struct MaskCollection + { + explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_) + : maskCollection(maskCollection_) {} + + __device__ __forceinline__ MaskCollection(const MaskCollection& masks_) + : maskCollection(masks_.maskCollection), curMask(masks_.curMask){} + + __device__ __forceinline__ void next() + { + curMask = *maskCollection++; + } + __device__ __forceinline__ void setMask(int z) + { + curMask = maskCollection[z]; + } + + __device__ __forceinline__ bool operator()(int y, int x) const + { + uchar val; + return curMask.data == 0 || (ForceGlob::Load(curMask.ptr(y), x, val), (val != 0)); + } + + const PtrStepb* maskCollection; + PtrStepb curMask; + }; + + struct WithOutMask + { + __device__ __forceinline__ WithOutMask(){} + __device__ __forceinline__ WithOutMask(const WithOutMask& mask){} + + __device__ __forceinline__ void next() const + { + } + __device__ __forceinline__ void setMask(int) const + { + } + + __device__ __forceinline__ bool operator()(int, int) const + { + return true; + } + + __device__ __forceinline__ bool operator()(int, int, int) const + { + return true; + } + + static __device__ __forceinline__ bool check(int, int) + { + return true; + } + + static __device__ __forceinline__ bool check(int, int, int, uint offset = 0) + { + return true; + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // Reduction + + template __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op) + { + StaticAssert= 8 && n <= 512>::check(); + utility_detail::ReductionDispatcher::reduce(data, partial_reduction, tid, op); + } + + template + __device__ __forceinline__ void reducePredVal(volatile T* sdata, T& myData, V* sval, V& myVal, int tid, const Pred& pred) + { + StaticAssert= 8 && n <= 512>::check(); + utility_detail::PredValReductionDispatcher::reduce(myData, myVal, sdata, sval, tid, pred); + } + + template + __device__ __forceinline__ void reducePredVal2(volatile T* sdata, T& myData, V1* sval1, V1& myVal1, V2* sval2, V2& myVal2, int tid, const Pred& pred) + { + StaticAssert= 8 && n <= 512>::check(); + utility_detail::PredVal2ReductionDispatcher::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred); + } + + /////////////////////////////////////////////////////////////////////////////// + // Solve linear system + + // solve 2x2 linear system Ax=b + template __device__ __forceinline__ bool solve2x2(const T A[2][2], const T b[2], T x[2]) + { + T det = A[0][0] * A[1][1] - A[1][0] * A[0][1]; + + if (det != 0) + { + double invdet = 1.0 / det; + + x[0] = saturate_cast(invdet * (b[0] * A[1][1] - b[1] * A[0][1])); + + x[1] = saturate_cast(invdet * (A[0][0] * b[1] - A[1][0] * b[0])); + + return true; + } + + return false; + } + + // solve 3x3 linear system Ax=b + template __device__ __forceinline__ bool solve3x3(const T A[3][3], const T b[3], T x[3]) + { + T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) + - A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + + A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]); + + if (det != 0) + { + double invdet = 1.0 / det; + + x[0] = saturate_cast(invdet * + (b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) - + A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) + + A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] ))); + + x[1] = saturate_cast(invdet * + (A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) - + b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + + A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0]))); + + x[2] = saturate_cast(invdet * + (A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) - + A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) + + b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]))); + + return true; + } + + return false; + } +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_UTILITY_HPP__ diff --git a/OpenCV/Headers/gpu/device/vec_distance.hpp b/OpenCV/Headers/gpu/device/vec_distance.hpp new file mode 100644 index 0000000000..b7861bca75 --- /dev/null +++ b/OpenCV/Headers/gpu/device/vec_distance.hpp @@ -0,0 +1,224 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VEC_DISTANCE_HPP__ +#define __OPENCV_GPU_VEC_DISTANCE_HPP__ + +#include "utility.hpp" +#include "functional.hpp" +#include "detail/vec_distance_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct L1Dist + { + typedef int value_type; + typedef int result_type; + + __device__ __forceinline__ L1Dist() : mySum(0) {} + + __device__ __forceinline__ void reduceIter(int val1, int val2) + { + mySum = __sad(val1, val2, mySum); + } + + template __device__ __forceinline__ void reduceAll(int* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator int() const + { + return mySum; + } + + int mySum; + }; + template <> struct L1Dist + { + typedef float value_type; + typedef float result_type; + + __device__ __forceinline__ L1Dist() : mySum(0.0f) {} + + __device__ __forceinline__ void reduceIter(float val1, float val2) + { + mySum += ::fabs(val1 - val2); + } + + template __device__ __forceinline__ void reduceAll(float* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator float() const + { + return mySum; + } + + float mySum; + }; + + struct L2Dist + { + typedef float value_type; + typedef float result_type; + + __device__ __forceinline__ L2Dist() : mySum(0.0f) {} + + __device__ __forceinline__ void reduceIter(float val1, float val2) + { + float reg = val1 - val2; + mySum += reg * reg; + } + + template __device__ __forceinline__ void reduceAll(float* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator float() const + { + return sqrtf(mySum); + } + + float mySum; + }; + + struct HammingDist + { + typedef int value_type; + typedef int result_type; + + __device__ __forceinline__ HammingDist() : mySum(0) {} + + __device__ __forceinline__ void reduceIter(int val1, int val2) + { + mySum += __popc(val1 ^ val2); + } + + template __device__ __forceinline__ void reduceAll(int* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator int() const + { + return mySum; + } + + int mySum; + }; + + // calc distance between two vectors in global memory + template + __device__ void calcVecDiffGlobal(const T1* vec1, const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) + { + for (int i = tid; i < len; i += THREAD_DIM) + { + T1 val1; + ForceGlob::Load(vec1, i, val1); + + T2 val2; + ForceGlob::Load(vec2, i, val2); + + dist.reduceIter(val1, val2); + } + + dist.reduceAll(smem, tid); + } + + // calc distance between two vectors, first vector is cached in register or shared memory, second vector is in global memory + template + __device__ __forceinline__ void calcVecDiffCached(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, typename Dist::result_type* smem, int tid) + { + vec_distance_detail::VecDiffCachedCalculator::calc(vecCached, vecGlob, len, dist, tid); + + dist.reduceAll(smem, tid); + } + + // calc distance between two vectors in global memory + template struct VecDiffGlobal + { + explicit __device__ __forceinline__ VecDiffGlobal(const T1* vec1_, int = 0, void* = 0, int = 0, int = 0) + { + vec1 = vec1_; + } + + template + __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const + { + calcVecDiffGlobal(vec1, vec2, len, dist, smem, tid); + } + + const T1* vec1; + }; + + // calc distance between two vectors, first vector is cached in register memory, second vector is in global memory + template struct VecDiffCachedRegister + { + template __device__ __forceinline__ VecDiffCachedRegister(const T1* vec1, int len, U* smem, int glob_tid, int tid) + { + if (glob_tid < len) + smem[glob_tid] = vec1[glob_tid]; + __syncthreads(); + + U* vec1ValsPtr = vec1Vals; + + #pragma unroll + for (int i = tid; i < MAX_LEN; i += THREAD_DIM) + *vec1ValsPtr++ = smem[i]; + + __syncthreads(); + } + + template + __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const + { + calcVecDiffCached(vec1Vals, vec2, len, dist, smem, tid); + } + + U vec1Vals[MAX_LEN / THREAD_DIM]; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VEC_DISTANCE_HPP__ diff --git a/OpenCV/Headers/gpu/device/vec_math.hpp b/OpenCV/Headers/gpu/device/vec_math.hpp new file mode 100644 index 0000000000..0ec790c0b7 --- /dev/null +++ b/OpenCV/Headers/gpu/device/vec_math.hpp @@ -0,0 +1,330 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VECMATH_HPP__ +#define __OPENCV_GPU_VECMATH_HPP__ + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "functional.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace vec_math_detail + { + template struct SatCastHelper; + template struct SatCastHelper<1, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x)); + } + }; + template struct SatCastHelper<2, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y)); + } + }; + template struct SatCastHelper<3, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y), saturate_cast(v.z)); + } + }; + template struct SatCastHelper<4, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y), saturate_cast(v.z), saturate_cast(v.w)); + } + }; + + template static __device__ __forceinline__ VecD saturate_cast_caller(const VecS& v) + { + return SatCastHelper::cn, VecD>::cast(v); + } + } + + template static __device__ __forceinline__ _Tp saturate_cast(const uchar1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const char1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const ushort1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const short1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const uint1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const int1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const float1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const double1& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + + template static __device__ __forceinline__ _Tp saturate_cast(const uchar2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const char2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const ushort2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const short2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const uint2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const int2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const float2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const double2& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + + template static __device__ __forceinline__ _Tp saturate_cast(const uchar3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const char3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const ushort3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const short3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const uint3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const int3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const float3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const double3& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + + template static __device__ __forceinline__ _Tp saturate_cast(const uchar4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const char4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const ushort4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const short4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const uint4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const int4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const float4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + template static __device__ __forceinline__ _Tp saturate_cast(const double4& v) {return vec_math_detail::saturate_cast_caller<_Tp>(v);} + +#define OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, op, func) \ + __device__ __forceinline__ TypeVec::result_type, 1>::vec_type op(const type ## 1 & a) \ + { \ + func f; \ + return VecTraits::result_type, 1>::vec_type>::make(f(a.x)); \ + } \ + __device__ __forceinline__ TypeVec::result_type, 2>::vec_type op(const type ## 2 & a) \ + { \ + func f; \ + return VecTraits::result_type, 2>::vec_type>::make(f(a.x), f(a.y)); \ + } \ + __device__ __forceinline__ TypeVec::result_type, 3>::vec_type op(const type ## 3 & a) \ + { \ + func f; \ + return VecTraits::result_type, 3>::vec_type>::make(f(a.x), f(a.y), f(a.z)); \ + } \ + __device__ __forceinline__ TypeVec::result_type, 4>::vec_type op(const type ## 4 & a) \ + { \ + func f; \ + return VecTraits::result_type, 4>::vec_type>::make(f(a.x), f(a.y), f(a.z), f(a.w)); \ + } + + namespace vec_math_detail + { + template struct BinOpTraits + { + typedef int argument_type; + }; + template struct BinOpTraits + { + typedef T argument_type; + }; + template struct BinOpTraits + { + typedef double argument_type; + }; + template struct BinOpTraits + { + typedef double argument_type; + }; + template <> struct BinOpTraits + { + typedef double argument_type; + }; + template struct BinOpTraits + { + typedef float argument_type; + }; + template struct BinOpTraits + { + typedef float argument_type; + }; + template <> struct BinOpTraits + { + typedef float argument_type; + }; + template <> struct BinOpTraits + { + typedef double argument_type; + }; + template <> struct BinOpTraits + { + typedef double argument_type; + }; + } + +#define OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, op, func) \ + __device__ __forceinline__ TypeVec::result_type, 1>::vec_type op(const type ## 1 & a, const type ## 1 & b) \ + { \ + func f; \ + return VecTraits::result_type, 1>::vec_type>::make(f(a.x, b.x)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 1>::vec_type op(const type ## 1 & v, T s) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 1>::vec_type>::make(f(v.x, s)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 1>::vec_type op(T s, const type ## 1 & v) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 1>::vec_type>::make(f(s, v.x)); \ + } \ + __device__ __forceinline__ TypeVec::result_type, 2>::vec_type op(const type ## 2 & a, const type ## 2 & b) \ + { \ + func f; \ + return VecTraits::result_type, 2>::vec_type>::make(f(a.x, b.x), f(a.y, b.y)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 2>::vec_type op(const type ## 2 & v, T s) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 2>::vec_type>::make(f(v.x, s), f(v.y, s)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 2>::vec_type op(T s, const type ## 2 & v) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 2>::vec_type>::make(f(s, v.x), f(s, v.y)); \ + } \ + __device__ __forceinline__ TypeVec::result_type, 3>::vec_type op(const type ## 3 & a, const type ## 3 & b) \ + { \ + func f; \ + return VecTraits::result_type, 3>::vec_type>::make(f(a.x, b.x), f(a.y, b.y), f(a.z, b.z)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 3>::vec_type op(const type ## 3 & v, T s) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 3>::vec_type>::make(f(v.x, s), f(v.y, s), f(v.z, s)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 3>::vec_type op(T s, const type ## 3 & v) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 3>::vec_type>::make(f(s, v.x), f(s, v.y), f(s, v.z)); \ + } \ + __device__ __forceinline__ TypeVec::result_type, 4>::vec_type op(const type ## 4 & a, const type ## 4 & b) \ + { \ + func f; \ + return VecTraits::result_type, 4>::vec_type>::make(f(a.x, b.x), f(a.y, b.y), f(a.z, b.z), f(a.w, b.w)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 4>::vec_type op(const type ## 4 & v, T s) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 4>::vec_type>::make(f(v.x, s), f(v.y, s), f(v.z, s), f(v.w, s)); \ + } \ + template \ + __device__ __forceinline__ typename TypeVec::argument_type>::result_type, 4>::vec_type op(T s, const type ## 4 & v) \ + { \ + func::argument_type> f; \ + return VecTraits::argument_type>::result_type, 4>::vec_type>::make(f(s, v.x), f(s, v.y), f(s, v.z), f(s, v.w)); \ + } + +#define OPENCV_GPU_IMPLEMENT_VEC_OP(type) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator +, plus) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator -, minus) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator *, multiplies) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator /, divides) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP (type, operator -, negate) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator ==, equal_to) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator !=, not_equal_to) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator > , greater) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator < , less) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator >=, greater_equal) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator <=, less_equal) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator &&, logical_and) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator ||, logical_or) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP (type, operator ! , logical_not) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, max, maximum) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, min, minimum) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, fabs, fabs_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, sqrt, sqrt_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, exp, exp_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, exp2, exp2_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, exp10, exp10_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, log, log_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, log2, log2_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, log10, log10_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, sin, sin_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, cos, cos_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, tan, tan_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, asin, asin_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, acos, acos_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, atan, atan_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, sinh, sinh_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, cosh, cosh_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, tanh, tanh_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, asinh, asinh_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, acosh, acosh_func) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP(type, atanh, atanh_func) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, hypot, hypot_func) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, atan2, atan2_func) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, pow, pow_func) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, hypot_sqr, hypot_sqr_func) + +#define OPENCV_GPU_IMPLEMENT_VEC_INT_OP(type) \ + OPENCV_GPU_IMPLEMENT_VEC_OP(type) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator &, bit_and) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator |, bit_or) \ + OPENCV_GPU_IMPLEMENT_VEC_BINOP(type, operator ^, bit_xor) \ + OPENCV_GPU_IMPLEMENT_VEC_UNOP (type, operator ~, bit_not) + + OPENCV_GPU_IMPLEMENT_VEC_INT_OP(uchar) + OPENCV_GPU_IMPLEMENT_VEC_INT_OP(char) + OPENCV_GPU_IMPLEMENT_VEC_INT_OP(ushort) + OPENCV_GPU_IMPLEMENT_VEC_INT_OP(short) + OPENCV_GPU_IMPLEMENT_VEC_INT_OP(int) + OPENCV_GPU_IMPLEMENT_VEC_INT_OP(uint) + OPENCV_GPU_IMPLEMENT_VEC_OP(float) + OPENCV_GPU_IMPLEMENT_VEC_OP(double) + + #undef OPENCV_GPU_IMPLEMENT_VEC_UNOP + #undef OPENCV_GPU_IMPLEMENT_VEC_BINOP + #undef OPENCV_GPU_IMPLEMENT_VEC_OP + #undef OPENCV_GPU_IMPLEMENT_VEC_INT_OP +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VECMATH_HPP__ \ No newline at end of file diff --git a/OpenCV/Headers/gpu/device/vec_traits.hpp b/OpenCV/Headers/gpu/device/vec_traits.hpp new file mode 100644 index 0000000000..8d179c83f5 --- /dev/null +++ b/OpenCV/Headers/gpu/device/vec_traits.hpp @@ -0,0 +1,280 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VEC_TRAITS_HPP__ +#define __OPENCV_GPU_VEC_TRAITS_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct TypeVec; + + struct __align__(8) uchar8 + { + uchar a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ uchar8 make_uchar8(uchar a0, uchar a1, uchar a2, uchar a3, uchar a4, uchar a5, uchar a6, uchar a7) + { + uchar8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(8) char8 + { + schar a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ char8 make_char8(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) + { + char8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(16) ushort8 + { + ushort a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ ushort8 make_ushort8(ushort a0, ushort a1, ushort a2, ushort a3, ushort a4, ushort a5, ushort a6, ushort a7) + { + ushort8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(16) short8 + { + short a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ short8 make_short8(short a0, short a1, short a2, short a3, short a4, short a5, short a6, short a7) + { + short8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) uint8 + { + uint a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ uint8 make_uint8(uint a0, uint a1, uint a2, uint a3, uint a4, uint a5, uint a6, uint a7) + { + uint8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) int8 + { + int a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ int8 make_int8(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7) + { + int8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) float8 + { + float a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ float8 make_float8(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7) + { + float8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct double8 + { + double a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ double8 make_double8(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7) + { + double8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + +#define OPENCV_GPU_IMPLEMENT_TYPE_VEC(type) \ + template<> struct TypeVec { typedef type vec_type; }; \ + template<> struct TypeVec { typedef type ## 1 vec_type; }; \ + template<> struct TypeVec { typedef type ## 2 vec_type; }; \ + template<> struct TypeVec { typedef type ## 2 vec_type; }; \ + template<> struct TypeVec { typedef type ## 3 vec_type; }; \ + template<> struct TypeVec { typedef type ## 3 vec_type; }; \ + template<> struct TypeVec { typedef type ## 4 vec_type; }; \ + template<> struct TypeVec { typedef type ## 4 vec_type; }; \ + template<> struct TypeVec { typedef type ## 8 vec_type; }; \ + template<> struct TypeVec { typedef type ## 8 vec_type; }; + + OPENCV_GPU_IMPLEMENT_TYPE_VEC(uchar) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(char) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(ushort) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(short) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(int) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(uint) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(float) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(double) + + #undef OPENCV_GPU_IMPLEMENT_TYPE_VEC + + template<> struct TypeVec { typedef schar vec_type; }; + template<> struct TypeVec { typedef char2 vec_type; }; + template<> struct TypeVec { typedef char3 vec_type; }; + template<> struct TypeVec { typedef char4 vec_type; }; + template<> struct TypeVec { typedef char8 vec_type; }; + + template<> struct TypeVec { typedef uchar vec_type; }; + template<> struct TypeVec { typedef uchar2 vec_type; }; + template<> struct TypeVec { typedef uchar3 vec_type; }; + template<> struct TypeVec { typedef uchar4 vec_type; }; + template<> struct TypeVec { typedef uchar8 vec_type; }; + + template struct VecTraits; + +#define OPENCV_GPU_IMPLEMENT_VEC_TRAITS(type) \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=1}; \ + static __device__ __host__ __forceinline__ type all(type v) {return v;} \ + static __device__ __host__ __forceinline__ type make(type x) {return x;} \ + static __device__ __host__ __forceinline__ type make(const type* v) {return *v;} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=1}; \ + static __device__ __host__ __forceinline__ type ## 1 all(type v) {return make_ ## type ## 1(v);} \ + static __device__ __host__ __forceinline__ type ## 1 make(type x) {return make_ ## type ## 1(x);} \ + static __device__ __host__ __forceinline__ type ## 1 make(const type* v) {return make_ ## type ## 1(*v);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=2}; \ + static __device__ __host__ __forceinline__ type ## 2 all(type v) {return make_ ## type ## 2(v, v);} \ + static __device__ __host__ __forceinline__ type ## 2 make(type x, type y) {return make_ ## type ## 2(x, y);} \ + static __device__ __host__ __forceinline__ type ## 2 make(const type* v) {return make_ ## type ## 2(v[0], v[1]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=3}; \ + static __device__ __host__ __forceinline__ type ## 3 all(type v) {return make_ ## type ## 3(v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 3 make(type x, type y, type z) {return make_ ## type ## 3(x, y, z);} \ + static __device__ __host__ __forceinline__ type ## 3 make(const type* v) {return make_ ## type ## 3(v[0], v[1], v[2]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=4}; \ + static __device__ __host__ __forceinline__ type ## 4 all(type v) {return make_ ## type ## 4(v, v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 4 make(type x, type y, type z, type w) {return make_ ## type ## 4(x, y, z, w);} \ + static __device__ __host__ __forceinline__ type ## 4 make(const type* v) {return make_ ## type ## 4(v[0], v[1], v[2], v[3]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=8}; \ + static __device__ __host__ __forceinline__ type ## 8 all(type v) {return make_ ## type ## 8(v, v, v, v, v, v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 8 make(type a0, type a1, type a2, type a3, type a4, type a5, type a6, type a7) {return make_ ## type ## 8(a0, a1, a2, a3, a4, a5, a6, a7);} \ + static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \ + }; + + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uchar) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(ushort) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(short) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(int) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uint) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(float) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(double) + + #undef OPENCV_GPU_IMPLEMENT_VEC_TRAITS + + template<> struct VecTraits + { + typedef char elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ char all(char v) {return v;} + static __device__ __host__ __forceinline__ char make(char x) {return x;} + static __device__ __host__ __forceinline__ char make(const char* x) {return *x;} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ schar all(schar v) {return v;} + static __device__ __host__ __forceinline__ schar make(schar x) {return x;} + static __device__ __host__ __forceinline__ schar make(const schar* x) {return *x;} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ char1 all(schar v) {return make_char1(v);} + static __device__ __host__ __forceinline__ char1 make(schar x) {return make_char1(x);} + static __device__ __host__ __forceinline__ char1 make(const schar* v) {return make_char1(v[0]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=2}; + static __device__ __host__ __forceinline__ char2 all(schar v) {return make_char2(v, v);} + static __device__ __host__ __forceinline__ char2 make(schar x, schar y) {return make_char2(x, y);} + static __device__ __host__ __forceinline__ char2 make(const schar* v) {return make_char2(v[0], v[1]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=3}; + static __device__ __host__ __forceinline__ char3 all(schar v) {return make_char3(v, v, v);} + static __device__ __host__ __forceinline__ char3 make(schar x, schar y, schar z) {return make_char3(x, y, z);} + static __device__ __host__ __forceinline__ char3 make(const schar* v) {return make_char3(v[0], v[1], v[2]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=4}; + static __device__ __host__ __forceinline__ char4 all(schar v) {return make_char4(v, v, v, v);} + static __device__ __host__ __forceinline__ char4 make(schar x, schar y, schar z, schar w) {return make_char4(x, y, z, w);} + static __device__ __host__ __forceinline__ char4 make(const schar* v) {return make_char4(v[0], v[1], v[2], v[3]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=8}; + static __device__ __host__ __forceinline__ char8 all(schar v) {return make_char8(v, v, v, v, v, v, v, v);} + static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);} + static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VEC_TRAITS_HPP__ diff --git a/OpenCV/Headers/gpu/device/warp.hpp b/OpenCV/Headers/gpu/device/warp.hpp new file mode 100644 index 0000000000..d4b0b8d8f7 --- /dev/null +++ b/OpenCV/Headers/gpu/device/warp.hpp @@ -0,0 +1,112 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DEVICE_WARP_HPP__ +#define __OPENCV_GPU_DEVICE_WARP_HPP__ + +namespace cv { namespace gpu { namespace device +{ + struct Warp + { + enum + { + LOG_WARP_SIZE = 5, + WARP_SIZE = 1 << LOG_WARP_SIZE, + STRIDE = WARP_SIZE + }; + + /** \brief Returns the warp lane ID of the calling thread. */ + static __device__ __forceinline__ unsigned int laneId() + { + unsigned int ret; + asm("mov.u32 %0, %laneid;" : "=r"(ret) ); + return ret; + } + + template + static __device__ __forceinline__ void fill(It beg, It end, const T& value) + { + for(It t = beg + laneId(); t < end; t += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ OutIt copy(InIt beg, InIt end, OutIt out) + { + for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE) + *out = *t; + return out; + } + + template + static __device__ __forceinline__ OutIt transform(InIt beg, InIt end, OutIt out, UnOp op) + { + for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE) + *out = op(*t); + return out; + } + + template + static __device__ __forceinline__ OutIt transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op) + { + unsigned int lane = laneId(); + + InIt1 t1 = beg1 + lane; + InIt2 t2 = beg2 + lane; + for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, out += STRIDE) + *out = op(*t1, *t2); + return out; + } + + template + static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value) + { + unsigned int lane = laneId(); + value += lane; + + for(OutIt t = beg + lane; t < end; t += STRIDE, value += STRIDE) + *t = value; + } + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */ \ No newline at end of file diff --git a/OpenCV/Headers/gpu/device/warp_reduce.hpp b/OpenCV/Headers/gpu/device/warp_reduce.hpp new file mode 100644 index 0000000000..7ac85b095d --- /dev/null +++ b/OpenCV/Headers/gpu/device/warp_reduce.hpp @@ -0,0 +1,69 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef OPENCV_GPU_WARP_REDUCE_HPP__ +#define OPENCV_GPU_WARP_REDUCE_HPP__ + +namespace cv { namespace gpu { namespace device +{ + template + __device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x) + { + const unsigned int lane = tid & 31; // index of thread in warp (0..31) + + if (lane < 16) + { + T partial = ptr[tid]; + + ptr[tid] = partial = partial + ptr[tid + 16]; + ptr[tid] = partial = partial + ptr[tid + 8]; + ptr[tid] = partial = partial + ptr[tid + 4]; + ptr[tid] = partial = partial + ptr[tid + 2]; + ptr[tid] = partial = partial + ptr[tid + 1]; + } + + return ptr[tid - lane]; + } +}}} // namespace cv { namespace gpu { namespace device { + +#endif /* OPENCV_GPU_WARP_REDUCE_HPP__ */ \ No newline at end of file diff --git a/OpenCV/Headers/gpu/devmem2d.hpp b/OpenCV/Headers/gpu/devmem2d.hpp new file mode 100644 index 0000000000..fc2d2f2757 --- /dev/null +++ b/OpenCV/Headers/gpu/devmem2d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/cuda_devptrs.hpp" diff --git a/OpenCV/Headers/gpu/gpu.hpp b/OpenCV/Headers/gpu/gpu.hpp new file mode 100644 index 0000000000..60cff99f6c --- /dev/null +++ b/OpenCV/Headers/gpu/gpu.hpp @@ -0,0 +1,2532 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_HPP__ +#define __OPENCV_GPU_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#include +#endif + +#include "opencv2/core/gpumat.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/objdetect/objdetect.hpp" +#include "opencv2/features2d/features2d.hpp" + +namespace cv { namespace gpu { + +//////////////////////////////// CudaMem //////////////////////////////// +// CudaMem is limited cv::Mat with page locked memory allocation. +// Page locked memory is only needed for async and faster coping to GPU. +// It is convertable to cv::Mat header without reference counting +// so you can use it with other opencv functions. + +// Page-locks the matrix m memory and maps it for the device(s) +CV_EXPORTS void registerPageLocked(Mat& m); +// Unmaps the memory of matrix m, and makes it pageable again. +CV_EXPORTS void unregisterPageLocked(Mat& m); + +class CV_EXPORTS CudaMem +{ +public: + enum { ALLOC_PAGE_LOCKED = 1, ALLOC_ZEROCOPY = 2, ALLOC_WRITE_COMBINED = 4 }; + + CudaMem(); + CudaMem(const CudaMem& m); + + CudaMem(int rows, int cols, int type, int _alloc_type = ALLOC_PAGE_LOCKED); + CudaMem(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED); + + + //! creates from cv::Mat with coping data + explicit CudaMem(const Mat& m, int alloc_type = ALLOC_PAGE_LOCKED); + + ~CudaMem(); + + CudaMem& operator = (const CudaMem& m); + + //! returns deep copy of the matrix, i.e. the data is copied + CudaMem clone() const; + + //! allocates new matrix data unless the matrix already has specified size and type. + void create(int rows, int cols, int type, int alloc_type = ALLOC_PAGE_LOCKED); + void create(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED); + + //! decrements reference counter and released memory if needed. + void release(); + + //! returns matrix header with disabled reference counting for CudaMem data. + Mat createMatHeader() const; + operator Mat() const; + + //! maps host memory into device address space and returns GpuMat header for it. Throws exception if not supported by hardware. + GpuMat createGpuMatHeader() const; + operator GpuMat() const; + + //returns if host memory can be mapperd to gpu address space; + static bool canMapHostMemory(); + + // Please see cv::Mat for descriptions + bool isContinuous() const; + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1() const; + Size size() const; + bool empty() const; + + + // Please see cv::Mat for descriptions + int flags; + int rows, cols; + size_t step; + + uchar* data; + int* refcount; + + uchar* datastart; + uchar* dataend; + + int alloc_type; +}; + +//////////////////////////////// CudaStream //////////////////////////////// +// Encapculates Cuda Stream. Provides interface for async coping. +// Passed to each function that supports async kernel execution. +// Reference counting is enabled + +class CV_EXPORTS Stream +{ +public: + Stream(); + ~Stream(); + + Stream(const Stream&); + Stream& operator=(const Stream&); + + bool queryIfComplete(); + void waitForCompletion(); + + //! downloads asynchronously. + // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its subMat) + void enqueueDownload(const GpuMat& src, CudaMem& dst); + void enqueueDownload(const GpuMat& src, Mat& dst); + + //! uploads asynchronously. + // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its ROI) + void enqueueUpload(const CudaMem& src, GpuMat& dst); + void enqueueUpload(const Mat& src, GpuMat& dst); + + void enqueueCopy(const GpuMat& src, GpuMat& dst); + + void enqueueMemSet(GpuMat& src, Scalar val); + void enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask); + + // converts matrix type, ex from float to uchar depending on type + void enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a = 1, double b = 0); + + static Stream& Null(); + + operator bool() const; + +private: + void create(); + void release(); + + struct Impl; + Impl *impl; + + friend struct StreamAccessor; + + explicit Stream(Impl* impl); +}; + + +//////////////////////////////// Filter Engine //////////////////////////////// + +/*! +The Base Class for 1D or Row-wise Filters + +This is the base class for linear or non-linear filters that process 1D data. +In particular, such filters are used for the "horizontal" filtering parts in separable filters. +*/ +class CV_EXPORTS BaseRowFilter_GPU +{ +public: + BaseRowFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {} + virtual ~BaseRowFilter_GPU() {} + virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; + int ksize, anchor; +}; + +/*! +The Base Class for Column-wise Filters + +This is the base class for linear or non-linear filters that process columns of 2D arrays. +Such filters are used for the "vertical" filtering parts in separable filters. +*/ +class CV_EXPORTS BaseColumnFilter_GPU +{ +public: + BaseColumnFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {} + virtual ~BaseColumnFilter_GPU() {} + virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; + int ksize, anchor; +}; + +/*! +The Base Class for Non-Separable 2D Filters. + +This is the base class for linear or non-linear 2D filters. +*/ +class CV_EXPORTS BaseFilter_GPU +{ +public: + BaseFilter_GPU(const Size& ksize_, const Point& anchor_) : ksize(ksize_), anchor(anchor_) {} + virtual ~BaseFilter_GPU() {} + virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; + Size ksize; + Point anchor; +}; + +/*! +The Base Class for Filter Engine. + +The class can be used to apply an arbitrary filtering operation to an image. +It contains all the necessary intermediate buffers. +*/ +class CV_EXPORTS FilterEngine_GPU +{ +public: + virtual ~FilterEngine_GPU() {} + + virtual void apply(const GpuMat& src, GpuMat& dst, Rect roi = Rect(0,0,-1,-1), Stream& stream = Stream::Null()) = 0; +}; + +//! returns the non-separable filter engine with the specified filter +CV_EXPORTS Ptr createFilter2D_GPU(const Ptr& filter2D, int srcType, int dstType); + +//! returns the separable filter engine with the specified filters +CV_EXPORTS Ptr createSeparableFilter_GPU(const Ptr& rowFilter, + const Ptr& columnFilter, int srcType, int bufType, int dstType); +CV_EXPORTS Ptr createSeparableFilter_GPU(const Ptr& rowFilter, + const Ptr& columnFilter, int srcType, int bufType, int dstType, GpuMat& buf); + +//! returns horizontal 1D box filter +//! supports only CV_8UC1 source type and CV_32FC1 sum type +CV_EXPORTS Ptr getRowSumFilter_GPU(int srcType, int sumType, int ksize, int anchor = -1); + +//! returns vertical 1D box filter +//! supports only CV_8UC1 sum type and CV_32FC1 dst type +CV_EXPORTS Ptr getColumnSumFilter_GPU(int sumType, int dstType, int ksize, int anchor = -1); + +//! returns 2D box filter +//! supports CV_8UC1 and CV_8UC4 source type, dst type must be the same as source type +CV_EXPORTS Ptr getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1)); + +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter_GPU(int srcType, int dstType, const Size& ksize, + const Point& anchor = Point(-1,-1)); + +//! returns 2D morphological filter +//! only MORPH_ERODE and MORPH_DILATE are supported +//! supports CV_8UC1 and CV_8UC4 types +//! kernel must have CV_8UC1 type, one rows and cols == ksize.width * ksize.height +CV_EXPORTS Ptr getMorphologyFilter_GPU(int op, int type, const Mat& kernel, const Size& ksize, + Point anchor=Point(-1,-1)); + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter_GPU(int op, int type, const Mat& kernel, + const Point& anchor = Point(-1,-1), int iterations = 1); +CV_EXPORTS Ptr createMorphologyFilter_GPU(int op, int type, const Mat& kernel, GpuMat& buf, + const Point& anchor = Point(-1,-1), int iterations = 1); + +//! returns 2D filter with the specified kernel +//! supports CV_8U, CV_16U and CV_32F one and four channel image +CV_EXPORTS Ptr getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, + Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT); + +//! returns the primitive row filter with the specified kernel. +//! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 source type. +//! there are two version of algorithm: NPP and OpenCV. +//! NPP calls when srcType == CV_8UC1 or srcType == CV_8UC4 and bufType == srcType, +//! otherwise calls OpenCV version. +//! NPP supports only BORDER_CONSTANT border type. +//! OpenCV version supports only CV_32F as buffer depth and +//! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types. +CV_EXPORTS Ptr getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel, + int anchor = -1, int borderType = BORDER_DEFAULT); + +//! returns the primitive column filter with the specified kernel. +//! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 dst type. +//! there are two version of algorithm: NPP and OpenCV. +//! NPP calls when dstType == CV_8UC1 or dstType == CV_8UC4 and bufType == dstType, +//! otherwise calls OpenCV version. +//! NPP supports only BORDER_CONSTANT border type. +//! OpenCV version supports only CV_32F as buffer depth and +//! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types. +CV_EXPORTS Ptr getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel, + int anchor = -1, int borderType = BORDER_DEFAULT); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel, + const Mat& columnKernel, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, + int columnBorderType = -1); +CV_EXPORTS Ptr createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel, + const Mat& columnKernel, GpuMat& buf, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, + int columnBorderType = -1); + +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS Ptr createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize, GpuMat& buf, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS Ptr createGaussianFilter_GPU(int type, Size ksize, GpuMat& buf, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); + +//! returns maximum filter +CV_EXPORTS Ptr getMaxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1)); + +//! returns minimum filter +CV_EXPORTS Ptr getMinFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1)); + +//! smooths the image using the normalized box filter +//! supports CV_8UC1, CV_8UC4 types +CV_EXPORTS void boxFilter(const GpuMat& src, GpuMat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null()); + +//! a synonym for normalized box filter +static inline void blur(const GpuMat& src, GpuMat& dst, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null()) +{ + boxFilter(src, dst, -1, ksize, anchor, stream); +} + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS void erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1); +CV_EXPORTS void erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, + Point anchor = Point(-1, -1), int iterations = 1, + Stream& stream = Stream::Null()); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS void dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1); +CV_EXPORTS void dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, + Point anchor = Point(-1, -1), int iterations = 1, + Stream& stream = Stream::Null()); + +//! applies an advanced morphological operation to the image +CV_EXPORTS void morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1); +CV_EXPORTS void morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, GpuMat& buf1, GpuMat& buf2, + Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null()); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS void filter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernel, Point anchor=Point(-1,-1), int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null()); + +//! applies separable 2D linear filter to the image +CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, + Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, GpuMat& buf, + Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, + Stream& stream = Stream::Null()); + +//! applies generalized Sobel operator to the image +CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, int ksize = 3, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); + +//! smooths the image using Gaussian filter. +CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, GpuMat& buf, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); + +//! applies Laplacian operator to the image +//! supports only ksize = 1 and ksize = 3 +CV_EXPORTS void Laplacian(const GpuMat& src, GpuMat& dst, int ddepth, int ksize = 1, double scale = 1, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null()); + + +////////////////////////////// Arithmetics /////////////////////////////////// + +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS void gemm(const GpuMat& src1, const GpuMat& src2, double alpha, + const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null()); + +//! transposes the matrix +//! supports matrix with element size = 1, 4 and 8 bytes (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc) +CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst, Stream& stream = Stream::Null()); + +//! reverses the order of the rows, columns or both in a matrix +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or CV_32F depth +CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode, Stream& stream = Stream::Null()); + +//! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i)) +//! destination array will have the depth type as lut and the same channels number as source +//! supports CV_8UC1, CV_8UC3 types +CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null()); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null()); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const vector& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null()); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const GpuMat& src, vector& dst, Stream& stream = Stream::Null()); + +//! computes magnitude of complex (x(i).re, x(i).im) vector +//! supports only CV_32FC2 type +CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes squared magnitude of complex (x(i).re, x(i).im) vector +//! supports only CV_32FC2 type +CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes magnitude of each (x(i), y(i)) vector +//! supports only floating-point source +CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes squared magnitude of each (x(i), y(i)) vector +//! supports only floating-point source +CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes angle (angle(i)) of each (x(i), y(i)) vector +//! supports only floating-point source +CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null()); + +//! converts Cartesian coordinates to polar +//! supports only floating-point source +CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null()); + +//! converts polar coordinates to Cartesian +//! supports only floating-point source +CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false, Stream& stream = Stream::Null()); + + +//////////////////////////// Per-element operations //////////////////////////////////// + +//! adds one matrix to another (c = a + b) +CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); +//! adds scalar to a matrix (c = a + s) +CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); + +//! subtracts one matrix from another (c = a - b) +CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); +//! subtracts scalar from a matrix (c = a - s) +CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); + +//! computes element-wise weighted product of the two arrays (c = scale * a * b) +CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); +//! weighted multiplies matrix to a scalar (c = scale * a * s) +CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); + +//! computes element-wise weighted quotient of the two arrays (c = a / b) +CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); +//! computes element-wise weighted quotient of matrix and scalar (c = a / s) +CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null()); + +//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, + int dtype = -1, Stream& stream = Stream::Null()); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +static inline void scaleAdd(const GpuMat& src1, double alpha, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()) +{ + addWeighted(src1, alpha, src2, 1.0, 0.0, dst, -1, stream); +} + +//! computes element-wise absolute difference of two arrays (c = abs(a - b)) +CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); +//! computes element-wise absolute difference of array and scalar (c = abs(a - s)) +CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null()); + +//! computes absolute value of each matrix element +//! supports CV_16S and CV_32F depth +CV_EXPORTS void abs(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes square of each pixel in an image +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void sqr(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes square root of each pixel in an image +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void sqrt(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes exponent of each matrix element (b = e**a) +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void exp(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null()); + +//! computes natural logarithm of absolute value of each matrix element: b = log(abs(a)) +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void log(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null()); + +//! computes power of each matrix element: +// (dst(i,j) = pow( src(i,j) , power), if src.type() is integer +// (dst(i,j) = pow(fabs(src(i,j)), power), otherwise +//! supports all, except depth == CV_64F +CV_EXPORTS void pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream = Stream::Null()); + +//! compares elements of two arrays (c = a b) +CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream = Stream::Null()); + +//! performs per-elements bit-wise inversion +CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); + +//! calculates per-element bit-wise disjunction of two arrays +CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); +//! calculates per-element bit-wise disjunction of array and scalar +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void bitwise_or(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! calculates per-element bit-wise conjunction of two arrays +CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); +//! calculates per-element bit-wise conjunction of array and scalar +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void bitwise_and(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! calculates per-element bit-wise "exclusive or" operation +CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); +//! calculates per-element bit-wise "exclusive or" of array and scalar +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void bitwise_xor(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! pixel by pixel right shift of an image by a constant value +//! supports 1, 3 and 4 channels images with integers elements +CV_EXPORTS void rshift(const GpuMat& src, Scalar_ sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! pixel by pixel left shift of an image by a constant value +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void lshift(const GpuMat& src, Scalar_ sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null()); + +enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL, + ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL}; + +//! Composite two images using alpha opacity values contained in each image +//! Supports CV_8UC4, CV_16UC4, CV_32SC4 and CV_32FC4 types +CV_EXPORTS void alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null()); + + +////////////////////////////// Image processing ////////////////////////////// + +//! DST[x,y] = SRC[xmap[x,y],ymap[x,y]] +//! supports only CV_32FC1 map type +CV_EXPORTS void remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, + int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), + Stream& stream = Stream::Null()); + +//! Does mean shift filtering on GPU. +CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, + TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), + Stream& stream = Stream::Null()); + +//! Does mean shift procedure on GPU. +CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, + TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), + Stream& stream = Stream::Null()); + +//! Does mean shift segmentation with elimination of small regions. +CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize, + TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1)); + +//! Does coloring of disparity image: [0..ndisp) -> [0..240, 1, 1] in HSV. +//! Supported types of input disparity: CV_8U, CV_16S. +//! Output disparity has CV_8UC4 type in BGRA format (alpha = 255). +CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, Stream& stream = Stream::Null()); + +//! Reprojects disparity image to 3D space. +//! Supports CV_8U and CV_16S types of input disparity. +//! The output is a 3- or 4-channel floating-point matrix. +//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map. +//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify. +CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, int dst_cn = 4, Stream& stream = Stream::Null()); + +//! converts image from one color space to another +CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null()); + +//! swap channels +//! dstOrder - Integer array describing how channel values are permutated. The n-th entry +//! of the array contains the number of the channel that is stored in the n-th channel of +//! the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR +//! channel order. +CV_EXPORTS void swapChannels(GpuMat& image, const int dstOrder[4], Stream& stream = Stream::Null()); + +//! Routines for correcting image color gamma +CV_EXPORTS void gammaCorrection(const GpuMat& src, GpuMat& dst, bool forward = true, Stream& stream = Stream::Null()); + +//! applies fixed threshold to the image +CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, Stream& stream = Stream::Null()); + +//! resizes the image +//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA +CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null()); + +//! warps the image using affine transformation +//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC +CV_EXPORTS void warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null()); + +CV_EXPORTS void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null()); + +//! warps the image using perspective transformation +//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC +CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null()); + +CV_EXPORTS void buildWarpPerspectiveMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null()); + +//! builds plane warping maps +CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, const Mat &T, float scale, + GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null()); + +//! builds cylindrical warping maps +CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale, + GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null()); + +//! builds spherical warping maps +CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale, + GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null()); + +//! rotates an image around the origin (0,0) and then shifts it +//! supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC +//! supports 1, 3 or 4 channels images with CV_8U, CV_16U or CV_32F depth +CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0, + int interpolation = INTER_LINEAR, Stream& stream = Stream::Null()); + +//! copies 2D array to a larger destination array and pads borders with user-specifiable constant +CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, + const Scalar& value = Scalar(), Stream& stream = Stream::Null()); + +//! computes the integral image +//! sum will have CV_32S type, but will contain unsigned int values +//! supports only CV_8UC1 source type +CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null()); +//! buffered version +CV_EXPORTS void integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& stream = Stream::Null()); + +//! computes squared integral image +//! result matrix will have 64F type, but will contain 64U values +//! supports source images of 8UC1 type only +CV_EXPORTS void sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null()); + +//! computes vertical sum, supports only CV_32FC1 images +CV_EXPORTS void columnSum(const GpuMat& src, GpuMat& sum); + +//! computes the standard deviation of integral images +//! supports only CV_32SC1 source type and CV_32FC1 sqr type +//! output will have CV_32FC1 type +CV_EXPORTS void rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null()); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101); +CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101); +CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k, + int borderType = BORDER_REFLECT101, Stream& stream = Stream::Null()); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101); +CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101); +CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, + int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null()); + +//! performs per-element multiplication of two full (not packed) Fourier spectrums +//! supports 32FC2 matrixes only (interleaved format) +CV_EXPORTS void mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream = Stream::Null()); + +//! performs per-element multiplication of two full (not packed) Fourier spectrums +//! supports 32FC2 matrixes only (interleaved format) +CV_EXPORTS void mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null()); + +//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix. +//! Param dft_size is the size of DFT transform. +//! +//! If the source matrix is not continous, then additional copy will be done, +//! so to avoid copying ensure the source matrix is continous one. If you want to use +//! preallocated output ensure it is continuous too, otherwise it will be reallocated. +//! +//! Being implemented via CUFFT real-to-complex transform result contains only non-redundant values +//! in CUFFT's format. Result as full complex matrix for such kind of transform cannot be retrieved. +//! +//! For complex-to-real transform it is assumed that the source matrix is packed in CUFFT's format. +CV_EXPORTS void dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream = Stream::Null()); + +struct CV_EXPORTS ConvolveBuf +{ + Size result_size; + Size block_size; + Size user_block_size; + Size dft_size; + int spect_len; + + GpuMat image_spect, templ_spect, result_spect; + GpuMat image_block, templ_block, result_data; + + void create(Size image_size, Size templ_size); + static Size estimateBlockSize(Size result_size, Size templ_size); +}; + + +//! computes convolution (or cross-correlation) of two images using discrete Fourier transform +//! supports source images of 32FC1 type only +//! result matrix will have 32FC1 type +CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr = false); +CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream = Stream::Null()); + +struct CV_EXPORTS MatchTemplateBuf +{ + Size user_block_size; + GpuMat imagef, templf; + std::vector images; + std::vector image_sums; + std::vector image_sqsums; +}; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null()); + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null()); + +//! smoothes the source image and downsamples it +CV_EXPORTS void pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! upsamples the source image and then smoothes it +CV_EXPORTS void pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! performs linear blending of two images +//! to avoid accuracy errors sum of weigths shouldn't be very close to zero +CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2, + GpuMat& result, Stream& stream = Stream::Null()); + +//! Performa bilateral filtering of passsed image +CV_EXPORTS void bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial, + int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null()); + +//! Brute force non-local means algorith (slow but universal) +CV_EXPORTS void nonLocalMeans(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream& s = Stream::Null()); + +//! Fast (but approximate)version of non-local means algorith similar to CPU function (running sums technique) +class CV_EXPORTS FastNonLocalMeansDenoising +{ +public: + //! Simple method, recommended for grayscale images (though it supports multichannel images) + void simpleMethod(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream& s = Stream::Null()); + + //! Processes luminance and color components separatelly + void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window = 21, int block_size = 7, Stream& s = Stream::Null()); + +private: + + GpuMat buffer, extended_src_buffer; + GpuMat lab, l, ab; +}; + + +struct CV_EXPORTS CannyBuf; + +CV_EXPORTS void Canny(const GpuMat& image, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false); +CV_EXPORTS void Canny(const GpuMat& image, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false); +CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false); +CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false); + +struct CV_EXPORTS CannyBuf +{ + CannyBuf() {} + explicit CannyBuf(const Size& image_size, int apperture_size = 3) {create(image_size, apperture_size);} + CannyBuf(const GpuMat& dx_, const GpuMat& dy_); + + void create(const Size& image_size, int apperture_size = 3); + + void release(); + + GpuMat dx, dy; + GpuMat dx_buf, dy_buf; + GpuMat edgeBuf; + GpuMat trackBuf1, trackBuf2; + Ptr filterDX, filterDY; +}; + +class CV_EXPORTS ImagePyramid +{ +public: + inline ImagePyramid() : nLayers_(0) {} + inline ImagePyramid(const GpuMat& img, int nLayers, Stream& stream = Stream::Null()) + { + build(img, nLayers, stream); + } + + void build(const GpuMat& img, int nLayers, Stream& stream = Stream::Null()); + + void getLayer(GpuMat& outImg, Size outRoi, Stream& stream = Stream::Null()) const; + + inline void release() + { + layer0_.release(); + pyramid_.clear(); + nLayers_ = 0; + } + +private: + GpuMat layer0_; + std::vector pyramid_; + int nLayers_; +}; + +//! HoughLines + +struct HoughLinesBuf +{ + GpuMat accum; + GpuMat list; +}; + +CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096); +CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096); +CV_EXPORTS void HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines, OutputArray h_votes = noArray()); + +//! HoughCircles + +struct HoughCirclesBuf +{ + GpuMat edges; + GpuMat accum; + GpuMat list; + CannyBuf cannyBuf; +}; + +CV_EXPORTS void HoughCircles(const GpuMat& src, GpuMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096); +CV_EXPORTS void HoughCircles(const GpuMat& src, GpuMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096); +CV_EXPORTS void HoughCirclesDownload(const GpuMat& d_circles, OutputArray h_circles); + +//! finds arbitrary template in the grayscale image using Generalized Hough Transform +//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122. +//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038. +class CV_EXPORTS GeneralizedHough_GPU : public Algorithm +{ +public: + static Ptr create(int method); + + virtual ~GeneralizedHough_GPU(); + + //! set template to search + void setTemplate(const GpuMat& templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1)); + void setTemplate(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, Point templCenter = Point(-1, -1)); + + //! find template on image + void detect(const GpuMat& image, GpuMat& positions, int cannyThreshold = 100); + void detect(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, GpuMat& positions); + + void download(const GpuMat& d_positions, OutputArray h_positions, OutputArray h_votes = noArray()); + + void release(); + +protected: + virtual void setTemplateImpl(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, Point templCenter) = 0; + virtual void detectImpl(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, GpuMat& positions) = 0; + virtual void releaseImpl() = 0; + +private: + GpuMat edges_; + CannyBuf cannyBuf_; +}; + +////////////////////////////// Matrix reductions ////////////////////////////// + +//! computes mean value and standard deviation of all or selected array elements +//! supports only CV_8UC1 type +CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev); +//! buffered version +CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev, GpuMat& buf); + +//! computes norm of array +//! supports NORM_INF, NORM_L1, NORM_L2 +//! supports all matrices except 64F +CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2); + +//! computes norm of array +//! supports NORM_INF, NORM_L1, NORM_L2 +//! supports all matrices except 64F +CV_EXPORTS double norm(const GpuMat& src1, int normType, GpuMat& buf); + +//! computes norm of the difference between two arrays +//! supports NORM_INF, NORM_L1, NORM_L2 +//! supports only CV_8UC1 type +CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2); + +//! computes sum of array elements +//! supports only single channel images +CV_EXPORTS Scalar sum(const GpuMat& src); + +//! computes sum of array elements +//! supports only single channel images +CV_EXPORTS Scalar sum(const GpuMat& src, GpuMat& buf); + +//! computes sum of array elements absolute values +//! supports only single channel images +CV_EXPORTS Scalar absSum(const GpuMat& src); + +//! computes sum of array elements absolute values +//! supports only single channel images +CV_EXPORTS Scalar absSum(const GpuMat& src, GpuMat& buf); + +//! computes squared sum of array elements +//! supports only single channel images +CV_EXPORTS Scalar sqrSum(const GpuMat& src); + +//! computes squared sum of array elements +//! supports only single channel images +CV_EXPORTS Scalar sqrSum(const GpuMat& src, GpuMat& buf); + +//! finds global minimum and maximum array elements and returns their values +CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal=0, const GpuMat& mask=GpuMat()); + +//! finds global minimum and maximum array elements and returns their values +CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf); + +//! finds global minimum and maximum array elements and returns their values with locations +CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, + const GpuMat& mask=GpuMat()); + +//! finds global minimum and maximum array elements and returns their values with locations +CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc, + const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf); + +//! counts non-zero array elements +CV_EXPORTS int countNonZero(const GpuMat& src); + +//! counts non-zero array elements +CV_EXPORTS int countNonZero(const GpuMat& src, GpuMat& buf); + +//! reduces a matrix to a vector +CV_EXPORTS void reduce(const GpuMat& mtx, GpuMat& vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null()); + + +///////////////////////////// Calibration 3D ////////////////////////////////// + +CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + GpuMat& dst, Stream& stream = Stream::Null()); + +CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, + Stream& stream = Stream::Null()); + +CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, + const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, + int num_iters=100, float max_dist=8.0, int min_inlier_count=100, + std::vector* inliers=NULL); + +//////////////////////////////// Image Labeling //////////////////////////////// + +//!performs labeling via graph cuts of a 2D regular 4-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//!performs labeling via graph cuts of a 2D regular 8-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight, + GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, + GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//! compute mask for Generalized Flood fill componetns labeling. +CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null()); + +//! performs connected componnents labeling. +CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); + +////////////////////////////////// Histograms ////////////////////////////////// + +//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type. +CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel); +//! Calculates histogram with evenly distributed bins for signle channel source. +//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types. +//! Output hist will have one row and histSize cols and CV_32SC1 type. +CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null()); +CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null()); +//! Calculates histogram with evenly distributed bins for four-channel source. +//! All channels of source are processed separately. +//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types. +//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type. +CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null()); +CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null()); +//! Calculates histogram with bins determined by levels array. +//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise. +//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types. +//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type. +CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null()); +CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null()); +//! Calculates histogram with bins determined by levels array. +//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise. +//! All channels of source are processed separately. +//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types. +//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type. +CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null()); +CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream = Stream::Null()); + +//! Calculates histogram for 8u one channel image +//! Output hist will have one row, 256 cols and CV32SC1 type. +CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null()); +CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null()); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); +CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream = Stream::Null()); +CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null()); + +//////////////////////////////// StereoBM_GPU //////////////////////////////// + +class CV_EXPORTS StereoBM_GPU +{ +public: + enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 }; + + enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 }; + + //! the default constructor + StereoBM_GPU(); + //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size. ndisparities must be multiple of 8. + StereoBM_GPU(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ); + + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair + //! Output disparity has CV_8U type. + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); + + //! Some heuristics that tries to estmate + // if current GPU will be faster than CPU in this algorithm. + // It queries current active device. + static bool checkIfGpuCallReasonable(); + + int preset; + int ndisp; + int winSize; + + // If avergeTexThreshold == 0 => post procesing is disabled + // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image + // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold + // i.e. input left image is low textured. + float avergeTexThreshold; + +private: + GpuMat minSSD, leBuf, riBuf; +}; + +////////////////////////// StereoBeliefPropagation /////////////////////////// +// "Efficient Belief Propagation for Early Vision" +// P.Felzenszwalb + +class CV_EXPORTS StereoBeliefPropagation +{ +public: + enum { DEFAULT_NDISP = 64 }; + enum { DEFAULT_ITERS = 5 }; + enum { DEFAULT_LEVELS = 5 }; + + static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels); + + //! the default constructor + explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP, + int iters = DEFAULT_ITERS, + int levels = DEFAULT_LEVELS, + int msg_type = CV_32F); + + //! the full constructor taking the number of disparities, number of BP iterations on each level, + //! number of levels, truncation of data cost, data weight, + //! truncation of discontinuity cost and discontinuity single jump + //! DataTerm = data_weight * min(fabs(I2-I1), max_data_term) + //! DiscTerm = min(disc_single_jump * fabs(f1-f2), max_disc_term) + //! please see paper for more details + StereoBeliefPropagation(int ndisp, int iters, int levels, + float max_data_term, float data_weight, + float max_disc_term, float disc_single_jump, + int msg_type = CV_32F); + + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair, + //! if disparity is empty output type will be CV_16S else output type will be disparity.type(). + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); + + + //! version for user specified data term + void operator()(const GpuMat& data, GpuMat& disparity, Stream& stream = Stream::Null()); + + int ndisp; + + int iters; + int levels; + + float max_data_term; + float data_weight; + float max_disc_term; + float disc_single_jump; + + int msg_type; +private: + GpuMat u, d, l, r, u2, d2, l2, r2; + std::vector datas; + GpuMat out; +}; + +/////////////////////////// StereoConstantSpaceBP /////////////////////////// +// "A Constant-Space Belief Propagation Algorithm for Stereo Matching" +// Qingxiong Yang, Liang Wang, Narendra Ahuja +// http://vision.ai.uiuc.edu/~qyang6/ + +class CV_EXPORTS StereoConstantSpaceBP +{ +public: + enum { DEFAULT_NDISP = 128 }; + enum { DEFAULT_ITERS = 8 }; + enum { DEFAULT_LEVELS = 4 }; + enum { DEFAULT_NR_PLANE = 4 }; + + static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane); + + //! the default constructor + explicit StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, + int iters = DEFAULT_ITERS, + int levels = DEFAULT_LEVELS, + int nr_plane = DEFAULT_NR_PLANE, + int msg_type = CV_32F); + + //! the full constructor taking the number of disparities, number of BP iterations on each level, + //! number of levels, number of active disparity on the first level, truncation of data cost, data weight, + //! truncation of discontinuity cost, discontinuity single jump and minimum disparity threshold + StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, + float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, + int min_disp_th = 0, + int msg_type = CV_32F); + + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair, + //! if disparity is empty output type will be CV_16S else output type will be disparity.type(). + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); + + int ndisp; + + int iters; + int levels; + + int nr_plane; + + float max_data_term; + float data_weight; + float max_disc_term; + float disc_single_jump; + + int min_disp_th; + + int msg_type; + + bool use_local_init_data_cost; +private: + GpuMat messages_buffers; + + GpuMat temp; + GpuMat out; +}; + +/////////////////////////// DisparityBilateralFilter /////////////////////////// +// Disparity map refinement using joint bilateral filtering given a single color image. +// Qingxiong Yang, Liang Wang, Narendra Ahuja +// http://vision.ai.uiuc.edu/~qyang6/ + +class CV_EXPORTS DisparityBilateralFilter +{ +public: + enum { DEFAULT_NDISP = 64 }; + enum { DEFAULT_RADIUS = 3 }; + enum { DEFAULT_ITERS = 1 }; + + //! the default constructor + explicit DisparityBilateralFilter(int ndisp = DEFAULT_NDISP, int radius = DEFAULT_RADIUS, int iters = DEFAULT_ITERS); + + //! the full constructor taking the number of disparities, filter radius, + //! number of iterations, truncation of data continuity, truncation of disparity continuity + //! and filter range sigma + DisparityBilateralFilter(int ndisp, int radius, int iters, float edge_threshold, float max_disc_threshold, float sigma_range); + + //! the disparity map refinement operator. Refine disparity map using joint bilateral filtering given a single color image. + //! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type. + void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream = Stream::Null()); + +private: + int ndisp; + int radius; + int iters; + + float edge_threshold; + float max_disc_threshold; + float sigma_range; + + GpuMat table_color; + GpuMat table_space; +}; + + +//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// +struct CV_EXPORTS HOGConfidence +{ + double scale; + vector locations; + vector confidences; + vector part_scores[4]; +}; + +struct CV_EXPORTS HOGDescriptor +{ + enum { DEFAULT_WIN_SIGMA = -1 }; + enum { DEFAULT_NLEVELS = 64 }; + enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL }; + + HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16), + Size block_stride=Size(8, 8), Size cell_size=Size(8, 8), + int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA, + double threshold_L2hys=0.2, bool gamma_correction=true, + int nlevels=DEFAULT_NLEVELS); + + size_t getDescriptorSize() const; + size_t getBlockHistogramSize() const; + + void setSVMDetector(const vector& detector); + + static vector getDefaultPeopleDetector(); + static vector getPeopleDetector48x96(); + static vector getPeopleDetector64x128(); + + void detect(const GpuMat& img, vector& found_locations, + double hit_threshold=0, Size win_stride=Size(), + Size padding=Size()); + + void detectMultiScale(const GpuMat& img, vector& found_locations, + double hit_threshold=0, Size win_stride=Size(), + Size padding=Size(), double scale0=1.05, + int group_threshold=2); + + void computeConfidence(const GpuMat& img, vector& hits, double hit_threshold, + Size win_stride, Size padding, vector& locations, vector& confidences); + + void computeConfidenceMultiScale(const GpuMat& img, vector& found_locations, + double hit_threshold, Size win_stride, Size padding, + vector &conf_out, int group_threshold); + + void getDescriptors(const GpuMat& img, Size win_stride, + GpuMat& descriptors, + int descr_format=DESCR_FORMAT_COL_BY_COL); + + Size win_size; + Size block_size; + Size block_stride; + Size cell_size; + int nbins; + double win_sigma; + double threshold_L2hys; + bool gamma_correction; + int nlevels; + +protected: + void computeBlockHistograms(const GpuMat& img); + void computeGradient(const GpuMat& img, GpuMat& grad, GpuMat& qangle); + + double getWinSigma() const; + bool checkDetectorSize() const; + + static int numPartsWithin(int size, int part_size, int stride); + static Size numPartsWithin(Size size, Size part_size, Size stride); + + // Coefficients of the separating plane + float free_coef; + GpuMat detector; + + // Results of the last classification step + GpuMat labels, labels_buf; + Mat labels_host; + + // Results of the last histogram evaluation step + GpuMat block_hists, block_hists_buf; + + // Gradients conputation results + GpuMat grad, qangle, grad_buf, qangle_buf; + + // returns subbuffer with required size, reallocates buffer if nessesary. + static GpuMat getBuffer(const Size& sz, int type, GpuMat& buf); + static GpuMat getBuffer(int rows, int cols, int type, GpuMat& buf); + + std::vector image_scales; +}; + + +////////////////////////////////// BruteForceMatcher ////////////////////////////////// + +class CV_EXPORTS BruteForceMatcher_GPU_base +{ +public: + enum DistType {L1Dist = 0, L2Dist, HammingDist}; + + explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist); + + // Add descriptors to train descriptor collection + void add(const std::vector& descCollection); + + // Get train descriptors collection + const std::vector& getTrainDescriptors() const; + + // Clear train descriptors collection + void clear(); + + // Return true if there are not train descriptors in collection + bool empty() const; + + // Return true if the matcher supports mask in match methods + bool isMaskSupported() const; + + // Find one best match for each query descriptor + void matchSingle(const GpuMat& query, const GpuMat& train, + GpuMat& trainIdx, GpuMat& distance, + const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx and distance and convert it to CPU vector with DMatch + static void matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector& matches); + // Convert trainIdx and distance to vector with DMatch + static void matchConvert(const Mat& trainIdx, const Mat& distance, std::vector& matches); + + // Find one best match for each query descriptor + void match(const GpuMat& query, const GpuMat& train, std::vector& matches, const GpuMat& mask = GpuMat()); + + // Make gpu collection of trains and masks in suitable format for matchCollection function + void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, const std::vector& masks = std::vector()); + + // Find one best match from train collection for each query descriptor + void matchCollection(const GpuMat& query, const GpuMat& trainCollection, + GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, + const GpuMat& masks = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx, imgIdx and distance and convert it to vector with DMatch + static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector& matches); + // Convert trainIdx, imgIdx and distance to vector with DMatch + static void matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector& matches); + + // Find one best match from train collection for each query descriptor. + void match(const GpuMat& query, std::vector& matches, const std::vector& masks = std::vector()); + + // Find k best matches for each query descriptor (in increasing order of distances) + void knnMatchSingle(const GpuMat& query, const GpuMat& train, + GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, + const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx and distance and convert it to vector with DMatch + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx and distance to vector with DMatch + static void knnMatchConvert(const Mat& trainIdx, const Mat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + void knnMatch(const GpuMat& query, const GpuMat& train, + std::vector< std::vector >& matches, int k, const GpuMat& mask = GpuMat(), + bool compactResult = false); + + // Find k best matches from train collection for each query descriptor (in increasing order of distances) + void knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection, + GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, + const GpuMat& maskCollection = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx and distance and convert it to vector with DMatch + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx and distance to vector with DMatch + static void knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + void knnMatch(const GpuMat& query, std::vector< std::vector >& matches, int k, + const std::vector& masks = std::vector(), bool compactResult = false); + + // Find best matches for each query descriptor which have distance less than maxDistance. + // nMatches.at(0, queryIdx) will contain matches count for queryIdx. + // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches, + // because it didn't have enough memory. + // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10), + // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches + // Matches doesn't sorted. + void radiusMatchSingle(const GpuMat& query, const GpuMat& train, + GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, + const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx, nMatches and distance and convert it to vector with DMatch. + // matches will be sorted in increasing order of distances. + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx, nMatches and distance to vector with DMatch. + static void radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find best matches for each query descriptor which have distance less than maxDistance + // in increasing order of distances). + void radiusMatch(const GpuMat& query, const GpuMat& train, + std::vector< std::vector >& matches, float maxDistance, + const GpuMat& mask = GpuMat(), bool compactResult = false); + + // Find best matches for each query descriptor which have distance less than maxDistance. + // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10), + // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches + // Matches doesn't sorted. + void radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, + const std::vector& masks = std::vector(), Stream& stream = Stream::Null()); + + // Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch. + // matches will be sorted in increasing order of distances. + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx, nMatches and distance to vector with DMatch. + static void radiusMatchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, const Mat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find best matches from train collection for each query descriptor which have distance less than + // maxDistance (in increasing order of distances). + void radiusMatch(const GpuMat& query, std::vector< std::vector >& matches, float maxDistance, + const std::vector& masks = std::vector(), bool compactResult = false); + + DistType distType; + +private: + std::vector trainDescCollection; +}; + +template +class CV_EXPORTS BruteForceMatcher_GPU; + +template +class CV_EXPORTS BruteForceMatcher_GPU< L1 > : public BruteForceMatcher_GPU_base +{ +public: + explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L1Dist) {} + explicit BruteForceMatcher_GPU(L1 /*d*/) : BruteForceMatcher_GPU_base(L1Dist) {} +}; +template +class CV_EXPORTS BruteForceMatcher_GPU< L2 > : public BruteForceMatcher_GPU_base +{ +public: + explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L2Dist) {} + explicit BruteForceMatcher_GPU(L2 /*d*/) : BruteForceMatcher_GPU_base(L2Dist) {} +}; +template <> class CV_EXPORTS BruteForceMatcher_GPU< Hamming > : public BruteForceMatcher_GPU_base +{ +public: + explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(HammingDist) {} + explicit BruteForceMatcher_GPU(Hamming /*d*/) : BruteForceMatcher_GPU_base(HammingDist) {} +}; + +////////////////////////////////// CascadeClassifier_GPU ////////////////////////////////////////// +// The cascade classifier class for object detection: supports old haar and new lbp xlm formats and nvbin for haar cascades olny. +class CV_EXPORTS CascadeClassifier_GPU +{ +public: + CascadeClassifier_GPU(); + CascadeClassifier_GPU(const std::string& filename); + ~CascadeClassifier_GPU(); + + bool empty() const; + bool load(const std::string& filename); + void release(); + + /* returns number of detected objects */ + int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.1, int minNeighbors = 4, Size minSize = Size()); + + bool findLargestObject; + bool visualizeInPlace; + + Size getClassifierSize() const; + +private: + + struct CascadeClassifierImpl; + CascadeClassifierImpl* impl; + struct HaarCascade; + struct LbpCascade; + friend class CascadeClassifier_GPU_LBP; +}; + +////////////////////////////////// SURF ////////////////////////////////////////// + +class CV_EXPORTS SURF_GPU +{ +public: + enum KeypointLayout + { + X_ROW = 0, + Y_ROW, + LAPLACIAN_ROW, + OCTAVE_ROW, + SIZE_ROW, + ANGLE_ROW, + HESSIAN_ROW, + ROWS_COUNT + }; + + //! the default constructor + SURF_GPU(); + //! the full constructor taking all the necessary parameters + explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4, + int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false); + + //! returns the descriptor size in float's (64 or 128) + int descriptorSize() const; + + //! upload host keypoints to device memory + void uploadKeypoints(const vector& keypoints, GpuMat& keypointsGPU); + //! download keypoints from device to host memory + void downloadKeypoints(const GpuMat& keypointsGPU, vector& keypoints); + + //! download descriptors from device to host memory + void downloadDescriptors(const GpuMat& descriptorsGPU, vector& descriptors); + + //! finds the keypoints using fast hessian detector used in SURF + //! supports CV_8UC1 images + //! keypoints will have nFeature cols and 6 rows + //! keypoints.ptr(X_ROW)[i] will contain x coordinate of i'th feature + //! keypoints.ptr(Y_ROW)[i] will contain y coordinate of i'th feature + //! keypoints.ptr(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature + //! keypoints.ptr(OCTAVE_ROW)[i] will contain octave of i'th feature + //! keypoints.ptr(SIZE_ROW)[i] will contain size of i'th feature + //! keypoints.ptr(ANGLE_ROW)[i] will contain orientation of i'th feature + //! keypoints.ptr(HESSIAN_ROW)[i] will contain response of i'th feature + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints); + //! finds the keypoints and computes their descriptors. + //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints); + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, std::vector& descriptors, + bool useProvidedKeypoints = false); + + void releaseMemory(); + + // SURF parameters + double hessianThreshold; + int nOctaves; + int nOctaveLayers; + bool extended; + bool upright; + + //! max keypoints = min(keypointsRatio * img.size().area(), 65535) + float keypointsRatio; + + GpuMat sum, mask1, maskSum, intBuffer; + + GpuMat det, trace; + + GpuMat maxPosBuffer; +}; + +////////////////////////////////// FAST ////////////////////////////////////////// + +class CV_EXPORTS FAST_GPU +{ +public: + enum + { + LOCATION_ROW = 0, + RESPONSE_ROW, + ROWS_COUNT + }; + + // all features have same size + static const int FEATURE_SIZE = 7; + + explicit FAST_GPU(int threshold, bool nonmaxSupression = true, double keypointsRatio = 0.05); + + //! finds the keypoints using FAST detector + //! supports only CV_8UC1 images + void operator ()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints); + void operator ()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints); + + //! download keypoints from device to host memory + void downloadKeypoints(const GpuMat& d_keypoints, std::vector& keypoints); + + //! convert keypoints to KeyPoint vector + void convertKeypoints(const Mat& h_keypoints, std::vector& keypoints); + + //! release temporary buffer's memory + void release(); + + bool nonmaxSupression; + + int threshold; + + //! max keypoints = keypointsRatio * img.size().area() + double keypointsRatio; + + //! find keypoints and compute it's response if nonmaxSupression is true + //! return count of detected keypoints + int calcKeyPointsLocation(const GpuMat& image, const GpuMat& mask); + + //! get final array of keypoints + //! performs nonmax supression if needed + //! return final count of keypoints + int getKeyPoints(GpuMat& keypoints); + +private: + GpuMat kpLoc_; + int count_; + + GpuMat score_; + + GpuMat d_keypoints_; +}; + +////////////////////////////////// ORB ////////////////////////////////////////// + +class CV_EXPORTS ORB_GPU +{ +public: + enum + { + X_ROW = 0, + Y_ROW, + RESPONSE_ROW, + ANGLE_ROW, + OCTAVE_ROW, + SIZE_ROW, + ROWS_COUNT + }; + + enum + { + DEFAULT_FAST_THRESHOLD = 20 + }; + + //! Constructor + explicit ORB_GPU(int nFeatures = 500, float scaleFactor = 1.2f, int nLevels = 8, int edgeThreshold = 31, + int firstLevel = 0, int WTA_K = 2, int scoreType = 0, int patchSize = 31); + + //! Compute the ORB features on an image + //! image - the image to compute the features (supports only CV_8UC1 images) + //! mask - the mask to apply + //! keypoints - the resulting keypoints + void operator()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints); + void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints); + + //! Compute the ORB features and descriptors on an image + //! image - the image to compute the features (supports only CV_8UC1 images) + //! mask - the mask to apply + //! keypoints - the resulting keypoints + //! descriptors - descriptors array + void operator()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors); + void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors); + + //! download keypoints from device to host memory + void downloadKeyPoints(GpuMat& d_keypoints, std::vector& keypoints); + + //! convert keypoints to KeyPoint vector + void convertKeyPoints(Mat& d_keypoints, std::vector& keypoints); + + //! returns the descriptor size in bytes + inline int descriptorSize() const { return kBytes; } + + inline void setFastParams(int threshold, bool nonmaxSupression = true) + { + fastDetector_.threshold = threshold; + fastDetector_.nonmaxSupression = nonmaxSupression; + } + + //! release temporary buffer's memory + void release(); + + //! if true, image will be blurred before descriptors calculation + bool blurForDescriptor; + +private: + enum { kBytes = 32 }; + + void buildScalePyramids(const GpuMat& image, const GpuMat& mask); + + void computeKeyPointsPyramid(); + + void computeDescriptors(GpuMat& descriptors); + + void mergeKeyPoints(GpuMat& keypoints); + + int nFeatures_; + float scaleFactor_; + int nLevels_; + int edgeThreshold_; + int firstLevel_; + int WTA_K_; + int scoreType_; + int patchSize_; + + // The number of desired features per scale + std::vector n_features_per_level_; + + // Points to compute BRIEF descriptors from + GpuMat pattern_; + + std::vector imagePyr_; + std::vector maskPyr_; + + GpuMat buf_; + + std::vector keyPointsPyr_; + std::vector keyPointsCount_; + + FAST_GPU fastDetector_; + + Ptr blurFilter; + + GpuMat d_keypoints_; +}; + +////////////////////////////////// Optical Flow ////////////////////////////////////////// + +class CV_EXPORTS BroxOpticalFlow +{ +public: + BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) : + alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_), + inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_) + { + } + + //! Compute optical flow + //! frame0 - source frame (supports only CV_32FC1 type) + //! frame1 - frame to track (with the same size and type as frame0) + //! u - flow horizontal component (along x axis) + //! v - flow vertical component (along y axis) + void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null()); + + //! flow smoothness + float alpha; + + //! gradient constancy importance + float gamma; + + //! pyramid scale factor + float scale_factor; + + //! number of lagged non-linearity iterations (inner loop) + int inner_iterations; + + //! number of warping iterations (number of pyramid levels) + int outer_iterations; + + //! number of linear system solver iterations + int solver_iterations; + + GpuMat buf; +}; + +class CV_EXPORTS GoodFeaturesToTrackDetector_GPU +{ +public: + explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0, + int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04); + + //! return 1 rows matrix with CV_32FC2 type + void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat()); + + int maxCorners; + double qualityLevel; + double minDistance; + + int blockSize; + bool useHarrisDetector; + double harrisK; + + void releaseMemory() + { + Dx_.release(); + Dy_.release(); + buf_.release(); + eig_.release(); + minMaxbuf_.release(); + tmpCorners_.release(); + } + +private: + GpuMat Dx_; + GpuMat Dy_; + GpuMat buf_; + GpuMat eig_; + GpuMat minMaxbuf_; + GpuMat tmpCorners_; +}; + +inline GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU(int maxCorners_, double qualityLevel_, double minDistance_, + int blockSize_, bool useHarrisDetector_, double harrisK_) +{ + maxCorners = maxCorners_; + qualityLevel = qualityLevel_; + minDistance = minDistance_; + blockSize = blockSize_; + useHarrisDetector = useHarrisDetector_; + harrisK = harrisK_; +} + + +class CV_EXPORTS PyrLKOpticalFlow +{ +public: + PyrLKOpticalFlow() + { + winSize = Size(21, 21); + maxLevel = 3; + iters = 30; + derivLambda = 0.5; + useInitialFlow = false; + minEigThreshold = 1e-4f; + getMinEigenVals = false; + isDeviceArch11_ = !DeviceInfo().supports(FEATURE_SET_COMPUTE_12); + } + + void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, + GpuMat& status, GpuMat* err = 0); + + void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0); + + Size winSize; + int maxLevel; + int iters; + double derivLambda; + bool useInitialFlow; + float minEigThreshold; + bool getMinEigenVals; + + void releaseMemory() + { + dx_calcBuf_.release(); + dy_calcBuf_.release(); + + prevPyr_.clear(); + nextPyr_.clear(); + + dx_buf_.release(); + dy_buf_.release(); + + uPyr_.clear(); + vPyr_.clear(); + } + +private: + void calcSharrDeriv(const GpuMat& src, GpuMat& dx, GpuMat& dy); + + void buildImagePyramid(const GpuMat& img0, vector& pyr, bool withBorder); + + GpuMat dx_calcBuf_; + GpuMat dy_calcBuf_; + + vector prevPyr_; + vector nextPyr_; + + GpuMat dx_buf_; + GpuMat dy_buf_; + + vector uPyr_; + vector vPyr_; + + bool isDeviceArch11_; +}; + + +class CV_EXPORTS FarnebackOpticalFlow +{ +public: + FarnebackOpticalFlow() + { + numLevels = 5; + pyrScale = 0.5; + fastPyramids = false; + winSize = 13; + numIters = 10; + polyN = 5; + polySigma = 1.1; + flags = 0; + isDeviceArch11_ = !DeviceInfo().supports(FEATURE_SET_COMPUTE_12); + } + + int numLevels; + double pyrScale; + bool fastPyramids; + int winSize; + int numIters; + int polyN; + double polySigma; + int flags; + + void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null()); + + void releaseMemory() + { + frames_[0].release(); + frames_[1].release(); + pyrLevel_[0].release(); + pyrLevel_[1].release(); + M_.release(); + bufM_.release(); + R_[0].release(); + R_[1].release(); + blurredFrame_[0].release(); + blurredFrame_[1].release(); + pyramid0_.clear(); + pyramid1_.clear(); + } + +private: + void prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55); + + void setPolynomialExpansionConsts(int n, double sigma); + + void updateFlow_boxFilter( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + + void updateFlow_gaussianBlur( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + + GpuMat frames_[2]; + GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; + std::vector pyramid0_, pyramid1_; + + bool isDeviceArch11_; +}; + + +//! Interpolate frames (images) using provided optical flow (displacement field). +//! frame0 - frame 0 (32-bit floating point images, single channel) +//! frame1 - frame 1 (the same type and size) +//! fu - forward horizontal displacement +//! fv - forward vertical displacement +//! bu - backward horizontal displacement +//! bv - backward vertical displacement +//! pos - new frame position +//! newFrame - new frame +//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat; +//! occlusion masks 0, occlusion masks 1, +//! interpolated forward flow 0, interpolated forward flow 1, +//! interpolated backward flow 0, interpolated backward flow 1 +//! +CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, + const GpuMat& fu, const GpuMat& fv, + const GpuMat& bu, const GpuMat& bv, + float pos, GpuMat& newFrame, GpuMat& buf, + Stream& stream = Stream::Null()); + +CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); + + +//////////////////////// Background/foreground segmentation //////////////////////// + +// Foreground Object Detection from Videos Containing Complex Background. +// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. +// ACM MM2003 9p +class CV_EXPORTS FGDStatModel +{ +public: + struct CV_EXPORTS Params + { + int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. + int N1c; // Number of color vectors used to model normal background color variation at a given pixel. + int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. + // Used to allow the first N1c vectors to adapt over time to changing background. + + int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. + int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel. + int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. + // Used to allow the first N1cc vectors to adapt over time to changing background. + + bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE. + int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations. + // These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. + + float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1. + float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005. + float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. + + float delta; // Affects color and color co-occurrence quantization, typically set to 2. + float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9). + float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold. + + // default Params + Params(); + }; + + // out_cn - channels count in output result (can be 3 or 4) + // 4-channels require more memory, but a bit faster + explicit FGDStatModel(int out_cn = 3); + explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3); + + ~FGDStatModel(); + + void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params()); + void release(); + + int update(const cv::gpu::GpuMat& curFrame); + + //8UC3 or 8UC4 reference background image + cv::gpu::GpuMat background; + + //8UC1 foreground image + cv::gpu::GpuMat foreground; + + std::vector< std::vector > foreground_regions; + +private: + FGDStatModel(const FGDStatModel&); + FGDStatModel& operator=(const FGDStatModel&); + + class Impl; + std::auto_ptr impl_; +}; + +/*! + Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm + + The class implements the following algorithm: + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf +*/ +class CV_EXPORTS MOG_GPU +{ +public: + //! the default constructor + MOG_GPU(int nmixtures = -1); + + //! re-initiaization method + void initialize(Size frameSize, int frameType); + + //! the update operator + void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null()); + + //! computes a background image which are the mean of all background gaussians + void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const; + + //! releases all inner buffers + void release(); + + int history; + float varThreshold; + float backgroundRatio; + float noiseSigma; + +private: + int nmixtures_; + + Size frameSize_; + int frameType_; + int nframes_; + + GpuMat weight_; + GpuMat sortKey_; + GpuMat mean_; + GpuMat var_; +}; + +/*! + The class implements the following algorithm: + "Improved adaptive Gausian mixture model for background subtraction" + Z.Zivkovic + International Conference Pattern Recognition, UK, August, 2004. + http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf +*/ +class CV_EXPORTS MOG2_GPU +{ +public: + //! the default constructor + MOG2_GPU(int nmixtures = -1); + + //! re-initiaization method + void initialize(Size frameSize, int frameType); + + //! the update operator + void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null()); + + //! computes a background image which are the mean of all background gaussians + void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const; + + //! releases all inner buffers + void release(); + + // parameters + // you should call initialize after parameters changes + + int history; + + //! here it is the maximum allowed number of mixture components. + //! Actual number is determined dynamically per pixel + float varThreshold; + // threshold on the squared Mahalanobis distance to decide if it is well described + // by the background model or not. Related to Cthr from the paper. + // This does not influence the update of the background. A typical value could be 4 sigma + // and that is varThreshold=4*4=16; Corresponds to Tb in the paper. + + ///////////////////////// + // less important parameters - things you might change but be carefull + //////////////////////// + + float backgroundRatio; + // corresponds to fTB=1-cf from the paper + // TB - threshold when the component becomes significant enough to be included into + // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0. + // For alpha=0.001 it means that the mode should exist for approximately 105 frames before + // it is considered foreground + // float noiseSigma; + float varThresholdGen; + + //correspondts to Tg - threshold on the squared Mahalan. dist. to decide + //when a sample is close to the existing components. If it is not close + //to any a new component will be generated. I use 3 sigma => Tg=3*3=9. + //Smaller Tg leads to more generated components and higher Tg might make + //lead to small number of components but they can grow too large + float fVarInit; + float fVarMin; + float fVarMax; + + //initial variance for the newly generated components. + //It will will influence the speed of adaptation. A good guess should be made. + //A simple way is to estimate the typical standard deviation from the images. + //I used here 10 as a reasonable value + // min and max can be used to further control the variance + float fCT; //CT - complexity reduction prior + //this is related to the number of samples needed to accept that a component + //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get + //the standard Stauffer&Grimson algorithm (maybe not exact but very similar) + + //shadow detection parameters + bool bShadowDetection; //default 1 - do shadow detection + unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value + float fTau; + // Tau - shadow threshold. The shadow is detected if the pixel is darker + //version of the background. Tau is a threshold on how much darker the shadow can be. + //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow + //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003. + +private: + int nmixtures_; + + Size frameSize_; + int frameType_; + int nframes_; + + GpuMat weight_; + GpuMat variance_; + GpuMat mean_; + + GpuMat bgmodelUsedModes_; //keep track of number of modes per pixel +}; + +/*! + * The class implements the following algorithm: + * "ViBe: A universal background subtraction algorithm for video sequences" + * O. Barnich and M. Van D Roogenbroeck + * IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011 + */ +class CV_EXPORTS VIBE_GPU +{ +public: + //! the default constructor + explicit VIBE_GPU(unsigned long rngSeed = 1234567); + + //! re-initiaization method + void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null()); + + //! the update operator + void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null()); + + //! releases all inner buffers + void release(); + + int nbSamples; // number of samples per pixel + int reqMatches; // #_min + int radius; // R + int subsamplingFactor; // amount of random subsampling + +private: + Size frameSize_; + + unsigned long rngSeed_; + GpuMat randStates_; + + GpuMat samples_; +}; + +/** + * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1) + * images of the same size, where 255 indicates Foreground and 0 represents Background. + * This class implements an algorithm described in "Visual Tracking of Human Visitors under + * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, + * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. + */ +class CV_EXPORTS GMG_GPU +{ +public: + GMG_GPU(); + + /** + * Validate parameters and set up data structures for appropriate frame size. + * @param frameSize Input frame size + * @param min Minimum value taken on by pixels in image sequence. Usually 0 + * @param max Maximum value taken on by pixels in image sequence. e.g. 1.0 or 255 + */ + void initialize(Size frameSize, float min = 0.0f, float max = 255.0f); + + /** + * Performs single-frame background subtraction and builds up a statistical background image + * model. + * @param frame Input frame + * @param fgmask Output mask image representing foreground and background pixels + * @param stream Stream for the asynchronous version + */ + void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null()); + + //! Releases all inner buffers + void release(); + + //! Total number of distinct colors to maintain in histogram. + int maxFeatures; + + //! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms. + float learningRate; + + //! Number of frames of video to use to initialize histograms. + int numInitializationFrames; + + //! Number of discrete levels in each channel to be used in histograms. + int quantizationLevels; + + //! Prior probability that any given pixel is a background pixel. A sensitivity parameter. + float backgroundPrior; + + //! Value above which pixel is determined to be FG. + float decisionThreshold; + + //! Smoothing radius, in pixels, for cleaning up FG image. + int smoothingRadius; + + //! Perform background model update. + bool updateBackgroundModel; + +private: + float maxVal_, minVal_; + + Size frameSize_; + + int frameNum_; + + GpuMat nfeatures_; + GpuMat colors_; + GpuMat weights_; + + Ptr boxFilter_; + GpuMat buf_; +}; + +////////////////////////////////// Video Encoding ////////////////////////////////// + +// Works only under Windows +// Supports olny H264 video codec and AVI files +class CV_EXPORTS VideoWriter_GPU +{ +public: + struct EncoderParams; + + // Callbacks for video encoder, use it if you want to work with raw video stream + class EncoderCallBack; + + enum SurfaceFormat + { + SF_UYVY = 0, + SF_YUY2, + SF_YV12, + SF_NV12, + SF_IYUV, + SF_BGR, + SF_GRAY = SF_BGR + }; + + VideoWriter_GPU(); + VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + ~VideoWriter_GPU(); + + // all methods throws cv::Exception if error occurs + void open(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + void open(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + void open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + void open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + + bool isOpened() const; + void close(); + + void write(const cv::gpu::GpuMat& image, bool lastFrame = false); + + struct CV_EXPORTS EncoderParams + { + int P_Interval; // NVVE_P_INTERVAL, + int IDR_Period; // NVVE_IDR_PERIOD, + int DynamicGOP; // NVVE_DYNAMIC_GOP, + int RCType; // NVVE_RC_TYPE, + int AvgBitrate; // NVVE_AVG_BITRATE, + int PeakBitrate; // NVVE_PEAK_BITRATE, + int QP_Level_Intra; // NVVE_QP_LEVEL_INTRA, + int QP_Level_InterP; // NVVE_QP_LEVEL_INTER_P, + int QP_Level_InterB; // NVVE_QP_LEVEL_INTER_B, + int DeblockMode; // NVVE_DEBLOCK_MODE, + int ProfileLevel; // NVVE_PROFILE_LEVEL, + int ForceIntra; // NVVE_FORCE_INTRA, + int ForceIDR; // NVVE_FORCE_IDR, + int ClearStat; // NVVE_CLEAR_STAT, + int DIMode; // NVVE_SET_DEINTERLACE, + int Presets; // NVVE_PRESETS, + int DisableCabac; // NVVE_DISABLE_CABAC, + int NaluFramingType; // NVVE_CONFIGURE_NALU_FRAMING_TYPE + int DisableSPSPPS; // NVVE_DISABLE_SPS_PPS + + EncoderParams(); + explicit EncoderParams(const std::string& configFile); + + void load(const std::string& configFile); + void save(const std::string& configFile) const; + }; + + EncoderParams getParams() const; + + class CV_EXPORTS EncoderCallBack + { + public: + enum PicType + { + IFRAME = 1, + PFRAME = 2, + BFRAME = 3 + }; + + virtual ~EncoderCallBack() {} + + // callback function to signal the start of bitstream that is to be encoded + // must return pointer to buffer + virtual uchar* acquireBitStream(int* bufferSize) = 0; + + // callback function to signal that the encoded bitstream is ready to be written to file + virtual void releaseBitStream(unsigned char* data, int size) = 0; + + // callback function to signal that the encoding operation on the frame has started + virtual void onBeginFrame(int frameNumber, PicType picType) = 0; + + // callback function signals that the encoding operation on the frame has finished + virtual void onEndFrame(int frameNumber, PicType picType) = 0; + }; + +private: + VideoWriter_GPU(const VideoWriter_GPU&); + VideoWriter_GPU& operator=(const VideoWriter_GPU&); + + class Impl; + std::auto_ptr impl_; +}; + + +////////////////////////////////// Video Decoding ////////////////////////////////////////// + +namespace detail +{ + class FrameQueue; + class VideoParser; +} + +class CV_EXPORTS VideoReader_GPU +{ +public: + enum Codec + { + MPEG1 = 0, + MPEG2, + MPEG4, + VC1, + H264, + JPEG, + H264_SVC, + H264_MVC, + + Uncompressed_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), // Y,U,V (4:2:0) + Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0) + Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0) + Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2) + Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')), // UYVY (4:2:2) + }; + + enum ChromaFormat + { + Monochrome=0, + YUV420, + YUV422, + YUV444, + }; + + struct FormatInfo + { + Codec codec; + ChromaFormat chromaFormat; + int width; + int height; + }; + + class VideoSource; + + VideoReader_GPU(); + explicit VideoReader_GPU(const std::string& filename); + explicit VideoReader_GPU(const cv::Ptr& source); + + ~VideoReader_GPU(); + + void open(const std::string& filename); + void open(const cv::Ptr& source); + bool isOpened() const; + + void close(); + + bool read(GpuMat& image); + + FormatInfo format() const; + void dumpFormat(std::ostream& st); + + class CV_EXPORTS VideoSource + { + public: + VideoSource() : frameQueue_(0), videoParser_(0) {} + virtual ~VideoSource() {} + + virtual FormatInfo format() const = 0; + virtual void start() = 0; + virtual void stop() = 0; + virtual bool isStarted() const = 0; + virtual bool hasError() const = 0; + + void setFrameQueue(detail::FrameQueue* frameQueue) { frameQueue_ = frameQueue; } + void setVideoParser(detail::VideoParser* videoParser) { videoParser_ = videoParser; } + + protected: + bool parseVideoData(const uchar* data, size_t size, bool endOfStream = false); + + private: + VideoSource(const VideoSource&); + VideoSource& operator =(const VideoSource&); + + detail::FrameQueue* frameQueue_; + detail::VideoParser* videoParser_; + }; + +private: + VideoReader_GPU(const VideoReader_GPU&); + VideoReader_GPU& operator =(const VideoReader_GPU&); + + class Impl; + std::auto_ptr impl_; +}; + +//! removes points (CV_32FC2, single row matrix) with zero mask value +CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask); + +CV_EXPORTS void calcWobbleSuppressionMaps( + int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, + GpuMat &mapx, GpuMat &mapy); + +} // namespace gpu + +} // namespace cv + +#endif /* __OPENCV_GPU_HPP__ */ diff --git a/OpenCV/Headers/gpu/gpumat.hpp b/OpenCV/Headers/gpu/gpumat.hpp new file mode 100644 index 0000000000..0033cbe961 --- /dev/null +++ b/OpenCV/Headers/gpu/gpumat.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/gpumat.hpp" diff --git a/OpenCV/Headers/gpu/stream_accessor.hpp b/OpenCV/Headers/gpu/stream_accessor.hpp new file mode 100644 index 0000000000..6a1a0bddd5 --- /dev/null +++ b/OpenCV/Headers/gpu/stream_accessor.hpp @@ -0,0 +1,64 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_STREAM_ACCESSOR_HPP__ +#define __OPENCV_GPU_STREAM_ACCESSOR_HPP__ + +#include "opencv2/gpu/gpu.hpp" +#include "cuda_runtime_api.h" + +namespace cv +{ + namespace gpu + { + // This is only header file that depends on Cuda. All other headers are independent. + // So if you use OpenCV binaries you do noot need to install Cuda Toolkit. + // But of you wanna use GPU by yourself, may get cuda stream instance using the class below. + // In this case you have to install Cuda Toolkit. + struct StreamAccessor + { + CV_EXPORTS static cudaStream_t getStream(const Stream& stream); + }; + } +} + +#endif /* __OPENCV_GPU_STREAM_ACCESSOR_HPP__ */ \ No newline at end of file diff --git a/OpenCV/Headers/highgui/cap_ios.h b/OpenCV/Headers/highgui/cap_ios.h new file mode 100644 index 0000000000..5bd5fe3c67 --- /dev/null +++ b/OpenCV/Headers/highgui/cap_ios.h @@ -0,0 +1,163 @@ +/* + * cap_ios.h + * For iOS video I/O + * by Eduard Feicho on 29/07/12 + * Copyright 2012. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import +#import +#import +#import +#include "opencv2/core/core.hpp" + +/////////////////////////////////////// CvAbstractCamera ///////////////////////////////////// + +@class CvAbstractCamera; + +@interface CvAbstractCamera : NSObject +{ + AVCaptureSession* captureSession; + AVCaptureConnection* videoCaptureConnection; + AVCaptureVideoPreviewLayer *captureVideoPreviewLayer; + + UIDeviceOrientation currentDeviceOrientation; + + BOOL cameraAvailable; + BOOL captureSessionLoaded; + BOOL running; + BOOL useAVCaptureVideoPreviewLayer; + + AVCaptureDevicePosition defaultAVCaptureDevicePosition; + AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; + NSString *const defaultAVCaptureSessionPreset; + + int defaultFPS; + + UIView* parentView; + + int imageWidth; + int imageHeight; +} + +@property (nonatomic, retain) AVCaptureSession* captureSession; +@property (nonatomic, retain) AVCaptureConnection* videoCaptureConnection; + +@property (nonatomic, readonly) BOOL running; +@property (nonatomic, readonly) BOOL captureSessionLoaded; + +@property (nonatomic, assign) int defaultFPS; +@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition; +@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; +@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer; +@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset; + +@property (nonatomic, assign) int imageWidth; +@property (nonatomic, assign) int imageHeight; + +@property (nonatomic, retain) UIView* parentView; + +- (void)start; +- (void)stop; +- (void)switchCameras; + +- (id)initWithParentView:(UIView*)parent; + +- (void)createCaptureOutput; +- (void)createVideoPreviewLayer; +- (void)updateOrientation; + + +@end + +///////////////////////////////// CvVideoCamera /////////////////////////////////////////// + +@class CvVideoCamera; + +@protocol CvVideoCameraDelegate + +#ifdef __cplusplus +// delegate method for processing image frames +- (void)processImage:(cv::Mat&)image; +#endif + +@end + +@interface CvVideoCamera : CvAbstractCamera +{ + AVCaptureVideoDataOutput *videoDataOutput; + + dispatch_queue_t videoDataOutputQueue; + CALayer *customPreviewLayer; + + BOOL grayscaleMode; + + BOOL recordVideo; + AVAssetWriterInput* recordAssetWriterInput; + AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; + AVAssetWriter* recordAssetWriter; + + CMTime lastSampleTime; + +} + +@property (nonatomic, assign) id delegate; +@property (nonatomic, assign) BOOL grayscaleMode; + +@property (nonatomic, assign) BOOL recordVideo; +@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput; +@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; +@property (nonatomic, retain) AVAssetWriter* recordAssetWriter; + +- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation; +- (void)layoutPreviewLayer; +- (void)saveVideo; +- (NSURL *)videoFileURL; + + +@end + +///////////////////////////////// CvPhotoCamera /////////////////////////////////////////// + +@class CvPhotoCamera; + +@protocol CvPhotoCameraDelegate + +- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image; +- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera; + +@end + +@interface CvPhotoCamera : CvAbstractCamera +{ + AVCaptureStillImageOutput *stillImageOutput; +} + +@property (nonatomic, assign) id delegate; + +- (void)takePicture; + +@end diff --git a/OpenCV/Headers/highgui/highgui.hpp b/OpenCV/Headers/highgui/highgui.hpp new file mode 100644 index 0000000000..57aef6314d --- /dev/null +++ b/OpenCV/Headers/highgui/highgui.hpp @@ -0,0 +1,253 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HIGHGUI_HPP__ +#define __OPENCV_HIGHGUI_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui_c.h" + +#ifdef __cplusplus + +struct CvCapture; +struct CvVideoWriter; + +namespace cv +{ + +enum { + // Flags for namedWindow + WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed + WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support + + // Flags for set / getWindowProperty + WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property + WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property + WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration + WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support +}; + +CV_EXPORTS_W void namedWindow(const string& winname, int flags = WINDOW_AUTOSIZE); +CV_EXPORTS_W void destroyWindow(const string& winname); +CV_EXPORTS_W void destroyAllWindows(); + +CV_EXPORTS_W int startWindowThread(); + +CV_EXPORTS_W int waitKey(int delay = 0); + +CV_EXPORTS_W void imshow(const string& winname, InputArray mat); + +CV_EXPORTS_W void resizeWindow(const string& winname, int width, int height); +CV_EXPORTS_W void moveWindow(const string& winname, int x, int y); + +CV_EXPORTS_W void setWindowProperty(const string& winname, int prop_id, double prop_value);//YV +CV_EXPORTS_W double getWindowProperty(const string& winname, int prop_id);//YV + +enum +{ + EVENT_MOUSEMOVE =0, + EVENT_LBUTTONDOWN =1, + EVENT_RBUTTONDOWN =2, + EVENT_MBUTTONDOWN =3, + EVENT_LBUTTONUP =4, + EVENT_RBUTTONUP =5, + EVENT_MBUTTONUP =6, + EVENT_LBUTTONDBLCLK =7, + EVENT_RBUTTONDBLCLK =8, + EVENT_MBUTTONDBLCLK =9 +}; + +enum +{ + EVENT_FLAG_LBUTTON =1, + EVENT_FLAG_RBUTTON =2, + EVENT_FLAG_MBUTTON =4, + EVENT_FLAG_CTRLKEY =8, + EVENT_FLAG_SHIFTKEY =16, + EVENT_FLAG_ALTKEY =32 +}; + +typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata); + +//! assigns callback for mouse events +CV_EXPORTS void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata = 0); + + +typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata); + +CV_EXPORTS int createTrackbar(const string& trackbarname, const string& winname, + int* value, int count, + TrackbarCallback onChange = 0, + void* userdata = 0); + +CV_EXPORTS_W int getTrackbarPos(const string& trackbarname, const string& winname); +CV_EXPORTS_W void setTrackbarPos(const string& trackbarname, const string& winname, int pos); + +// OpenGL support + +typedef void (*OpenGlDrawCallback)(void* userdata); +CV_EXPORTS void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0); + +CV_EXPORTS void setOpenGlContext(const string& winname); + +CV_EXPORTS void updateWindow(const string& winname); + +CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, const GlArrays& arr); +CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, InputArray points, InputArray colors = noArray()); + +//Only for Qt + +CV_EXPORTS CvFont fontQt(const string& nameFont, int pointSize=-1, + Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL, + int style=CV_STYLE_NORMAL, int spacing=0); +CV_EXPORTS void addText( const Mat& img, const string& text, Point org, CvFont font); + +CV_EXPORTS void displayOverlay(const string& winname, const string& text, int delayms CV_DEFAULT(0)); +CV_EXPORTS void displayStatusBar(const string& winname, const string& text, int delayms CV_DEFAULT(0)); + +CV_EXPORTS void saveWindowParameters(const string& windowName); +CV_EXPORTS void loadWindowParameters(const string& windowName); +CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CV_EXPORTS void stopLoop(); + +typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata); +CV_EXPORTS int createButton( const string& bar_name, ButtonCallback on_change, + void* userdata=NULL, int type=CV_PUSH_BUTTON, + bool initial_button_state=0); + +//------------------------- + +enum +{ + // 8bit, color or not + IMREAD_UNCHANGED =-1, + // 8bit, gray + IMREAD_GRAYSCALE =0, + // ?, color + IMREAD_COLOR =1, + // any depth, ? + IMREAD_ANYDEPTH =2, + // ?, any color + IMREAD_ANYCOLOR =4 +}; + +enum +{ + IMWRITE_JPEG_QUALITY =1, + IMWRITE_PNG_COMPRESSION =16, + IMWRITE_PNG_STRATEGY =17, + IMWRITE_PNG_BILEVEL =18, + IMWRITE_PNG_STRATEGY_DEFAULT =0, + IMWRITE_PNG_STRATEGY_FILTERED =1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + IMWRITE_PNG_STRATEGY_RLE =3, + IMWRITE_PNG_STRATEGY_FIXED =4, + IMWRITE_PXM_BINARY =32 +}; + +CV_EXPORTS_W Mat imread( const string& filename, int flags=1 ); +CV_EXPORTS_W bool imwrite( const string& filename, InputArray img, + const vector& params=vector()); +CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); +CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst ); +CV_EXPORTS_W bool imencode( const string& ext, InputArray img, + CV_OUT vector& buf, + const vector& params=vector()); + +#ifndef CV_NO_VIDEO_CAPTURE_CPP_API + +template<> void CV_EXPORTS Ptr::delete_obj(); +template<> void CV_EXPORTS Ptr::delete_obj(); + +class CV_EXPORTS_W VideoCapture +{ +public: + CV_WRAP VideoCapture(); + CV_WRAP VideoCapture(const string& filename); + CV_WRAP VideoCapture(int device); + + virtual ~VideoCapture(); + CV_WRAP virtual bool open(const string& filename); + CV_WRAP virtual bool open(int device); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + + CV_WRAP virtual bool grab(); + CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0); + virtual VideoCapture& operator >> (CV_OUT Mat& image); + CV_WRAP virtual bool read(CV_OUT Mat& image); + + CV_WRAP virtual bool set(int propId, double value); + CV_WRAP virtual double get(int propId); + +protected: + Ptr cap; +}; + + +class CV_EXPORTS_W VideoWriter +{ +public: + CV_WRAP VideoWriter(); + CV_WRAP VideoWriter(const string& filename, int fourcc, double fps, + Size frameSize, bool isColor=true); + + virtual ~VideoWriter(); + CV_WRAP virtual bool open(const string& filename, int fourcc, double fps, + Size frameSize, bool isColor=true); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + virtual VideoWriter& operator << (const Mat& image); + CV_WRAP virtual void write(const Mat& image); + +protected: + Ptr writer; +}; + +#endif + +} + +#endif + +#endif diff --git a/OpenCV/Headers/highgui/highgui_c.h b/OpenCV/Headers/highgui/highgui_c.h new file mode 100644 index 0000000000..9c7166fc9a --- /dev/null +++ b/OpenCV/Headers/highgui/highgui_c.h @@ -0,0 +1,619 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HIGHGUI_H__ +#define __OPENCV_HIGHGUI_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/****************************************************************************************\ +* Basic GUI functions * +\****************************************************************************************/ +//YV +//-----------New for Qt +/* For font */ +enum { CV_FONT_LIGHT = 25,//QFont::Light, + CV_FONT_NORMAL = 50,//QFont::Normal, + CV_FONT_DEMIBOLD = 63,//QFont::DemiBold, + CV_FONT_BOLD = 75,//QFont::Bold, + CV_FONT_BLACK = 87 //QFont::Black +}; + +enum { CV_STYLE_NORMAL = 0,//QFont::StyleNormal, + CV_STYLE_ITALIC = 1,//QFont::StyleItalic, + CV_STYLE_OBLIQUE = 2 //QFont::StyleOblique +}; +/* ---------*/ + +//for color cvScalar(blue_component, green_component, red\_component[, alpha_component]) +//and alpha= 0 <-> 0xFF (not transparent <-> transparent) +CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0)); + +CVAPI(void) cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont *arg2); + +CVAPI(void) cvDisplayOverlay(const char* name, const char* text, int delayms CV_DEFAULT(0)); +CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms CV_DEFAULT(0)); + +CVAPI(void) cvSaveWindowParameters(const char* name); +CVAPI(void) cvLoadWindowParameters(const char* name); +CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CVAPI(void) cvStopLoop( void ); + +typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata); +enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2}; +CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCallback on_change CV_DEFAULT(NULL), void* userdata CV_DEFAULT(NULL) , int button_type CV_DEFAULT(CV_PUSH_BUTTON), int initial_button_state CV_DEFAULT(0)); +//---------------------- + + +/* this function is used to set some external parameters in case of X Window */ +CVAPI(int) cvInitSystem( int argc, char** argv ); + +CVAPI(int) cvStartWindowThread( void ); + +// --------- YV --------- +enum +{ + //These 3 flags are used by cvSet/GetWindowProperty + CV_WND_PROP_FULLSCREEN = 0, //to change/get window's fullscreen property + CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property + CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property + CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support + + //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed + CV_WINDOW_OPENGL = 0x00001000, //window with opengl support + + //Those flags are only for Qt + CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar + CV_GUI_NORMAL = 0x00000010, //old fashious way + + //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen + CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint) + CV_WINDOW_KEEPRATIO = 0x00000000//the ration image is respected. +}; + +/* create window */ +CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOSIZE) ); + +/* Set and Get Property of the window */ +CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value); +CVAPI(double) cvGetWindowProperty(const char* name, int prop_id); + +/* display image within window (highgui windows remember their content) */ +CVAPI(void) cvShowImage( const char* name, const CvArr* image ); + +/* resize/move window */ +CVAPI(void) cvResizeWindow( const char* name, int width, int height ); +CVAPI(void) cvMoveWindow( const char* name, int x, int y ); + + +/* destroy window and all the trackers associated with it */ +CVAPI(void) cvDestroyWindow( const char* name ); + +CVAPI(void) cvDestroyAllWindows(void); + +/* get native window handle (HWND in case of Win32 and Widget in case of X Window) */ +CVAPI(void*) cvGetWindowHandle( const char* name ); + +/* get name of highgui window given its native handle */ +CVAPI(const char*) cvGetWindowName( void* window_handle ); + + +typedef void (CV_CDECL *CvTrackbarCallback)(int pos); + +/* create trackbar and display it on top of given window, set callback */ +CVAPI(int) cvCreateTrackbar( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback on_change CV_DEFAULT(NULL)); + +typedef void (CV_CDECL *CvTrackbarCallback2)(int pos, void* userdata); + +CVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback2 on_change, + void* userdata CV_DEFAULT(0)); + +/* retrieve or set trackbar position */ +CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name ); +CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ); + +enum +{ + CV_EVENT_MOUSEMOVE =0, + CV_EVENT_LBUTTONDOWN =1, + CV_EVENT_RBUTTONDOWN =2, + CV_EVENT_MBUTTONDOWN =3, + CV_EVENT_LBUTTONUP =4, + CV_EVENT_RBUTTONUP =5, + CV_EVENT_MBUTTONUP =6, + CV_EVENT_LBUTTONDBLCLK =7, + CV_EVENT_RBUTTONDBLCLK =8, + CV_EVENT_MBUTTONDBLCLK =9 +}; + +enum +{ + CV_EVENT_FLAG_LBUTTON =1, + CV_EVENT_FLAG_RBUTTON =2, + CV_EVENT_FLAG_MBUTTON =4, + CV_EVENT_FLAG_CTRLKEY =8, + CV_EVENT_FLAG_SHIFTKEY =16, + CV_EVENT_FLAG_ALTKEY =32 +}; + +typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param); + +/* assign callback for mouse events */ +CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, + void* param CV_DEFAULT(NULL)); + +enum +{ +/* 8bit, color or not */ + CV_LOAD_IMAGE_UNCHANGED =-1, +/* 8bit, gray */ + CV_LOAD_IMAGE_GRAYSCALE =0, +/* ?, color */ + CV_LOAD_IMAGE_COLOR =1, +/* any depth, ? */ + CV_LOAD_IMAGE_ANYDEPTH =2, +/* ?, any color */ + CV_LOAD_IMAGE_ANYCOLOR =4 +}; + +/* load image from file + iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED + overrides the other flags + using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED + unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit +*/ +CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +enum +{ + CV_IMWRITE_JPEG_QUALITY =1, + CV_IMWRITE_PNG_COMPRESSION =16, + CV_IMWRITE_PNG_STRATEGY =17, + CV_IMWRITE_PNG_BILEVEL =18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT =0, + CV_IMWRITE_PNG_STRATEGY_FILTERED =1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + CV_IMWRITE_PNG_STRATEGY_RLE =3, + CV_IMWRITE_PNG_STRATEGY_FIXED =4, + CV_IMWRITE_PXM_BINARY =32 +}; + +/* save image to file */ +CVAPI(int) cvSaveImage( const char* filename, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +/* decode image stored in the buffer */ +CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */ +CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +enum +{ + CV_CVTIMG_FLIP =1, + CV_CVTIMG_SWAP_RB =2 +}; + +/* utility function: convert one image to another with optional vertical flip */ +CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0)); + +/* wait for key event infinitely (delay<=0) or for "delay" milliseconds */ +CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0)); + +// OpenGL support + +typedef void (CV_CDECL *CvOpenGlDrawCallback)(void* userdata); +CVAPI(void) cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata CV_DEFAULT(NULL)); + +CVAPI(void) cvSetOpenGlContext(const char* window_name); +CVAPI(void) cvUpdateWindow(const char* window_name); + + +/****************************************************************************************\ +* Working with Video Files and Cameras * +\****************************************************************************************/ + +/* "black box" capture structure */ +typedef struct CvCapture CvCapture; + +/* start capturing frames from video file */ +CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); + +enum +{ + CV_CAP_ANY =0, // autodetect + + CV_CAP_MIL =100, // MIL proprietary drivers + + CV_CAP_VFW =200, // platform native + CV_CAP_V4L =200, + CV_CAP_V4L2 =200, + + CV_CAP_FIREWARE =300, // IEEE 1394 drivers + CV_CAP_FIREWIRE =300, + CV_CAP_IEEE1394 =300, + CV_CAP_DC1394 =300, + CV_CAP_CMU1394 =300, + + CV_CAP_STEREO =400, // TYZX proprietary drivers + CV_CAP_TYZX =400, + CV_TYZX_LEFT =400, + CV_TYZX_RIGHT =401, + CV_TYZX_COLOR =402, + CV_TYZX_Z =403, + + CV_CAP_QT =500, // QuickTime + + CV_CAP_UNICAP =600, // Unicap drivers + + CV_CAP_DSHOW =700, // DirectShow (via videoInput) + + CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK + + CV_CAP_OPENNI =900, // OpenNI (for Kinect) + CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion) + + CV_CAP_ANDROID =1000, // Android + + CV_CAP_XIAPI =1100, // XIMEA Camera API + + CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + + CV_CAP_GIGANETIX = 1300 // Smartek Giganetix GigEVisionSDK +}; + +/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */ +CVAPI(CvCapture*) cvCreateCameraCapture( int index ); + +/* grab a frame, return 1 on success, 0 on fail. + this function is thought to be fast */ +CVAPI(int) cvGrabFrame( CvCapture* capture ); + +/* get the frame grabbed with cvGrabFrame(..) + This function may apply some frame processing like + frame decompression, flipping etc. + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) ); + +/* Just a combination of cvGrabFrame and cvRetrieveFrame + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvQueryFrame( CvCapture* capture ); + +/* stop capturing/reading and free resources */ +CVAPI(void) cvReleaseCapture( CvCapture** capture ); + +enum +{ + // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) + // every feature can have only one mode turned on at a time + CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CV_CAP_PROP_DC1394_MODE_AUTO = -2, + CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CV_CAP_PROP_POS_MSEC =0, + CV_CAP_PROP_POS_FRAMES =1, + CV_CAP_PROP_POS_AVI_RATIO =2, + CV_CAP_PROP_FRAME_WIDTH =3, + CV_CAP_PROP_FRAME_HEIGHT =4, + CV_CAP_PROP_FPS =5, + CV_CAP_PROP_FOURCC =6, + CV_CAP_PROP_FRAME_COUNT =7, + CV_CAP_PROP_FORMAT =8, + CV_CAP_PROP_MODE =9, + CV_CAP_PROP_BRIGHTNESS =10, + CV_CAP_PROP_CONTRAST =11, + CV_CAP_PROP_SATURATION =12, + CV_CAP_PROP_HUE =13, + CV_CAP_PROP_GAIN =14, + CV_CAP_PROP_EXPOSURE =15, + CV_CAP_PROP_CONVERT_RGB =16, + CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17, + CV_CAP_PROP_RECTIFICATION =18, + CV_CAP_PROP_MONOCROME =19, + CV_CAP_PROP_SHARPNESS =20, + CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera, + // user can adjust refernce level + // using this feature + CV_CAP_PROP_GAMMA =22, + CV_CAP_PROP_TEMPERATURE =23, + CV_CAP_PROP_TRIGGER =24, + CV_CAP_PROP_TRIGGER_DELAY =25, + CV_CAP_PROP_WHITE_BALANCE_RED_V =26, + CV_CAP_PROP_ZOOM =27, + CV_CAP_PROP_FOCUS =28, + CV_CAP_PROP_GUID =29, + CV_CAP_PROP_ISO_SPEED =30, + CV_CAP_PROP_MAX_DC1394 =31, + CV_CAP_PROP_BACKLIGHT =32, + CV_CAP_PROP_PAN =33, + CV_CAP_PROP_TILT =34, + CV_CAP_PROP_ROLL =35, + CV_CAP_PROP_IRIS =36, + CV_CAP_PROP_SETTINGS =37, + + CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only + CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed + CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed + + // OpenNI map generators + CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR, + + // Properties of cameras available through OpenNI interfaces + CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm + CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag + CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + + CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE, + CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE, + CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, + + // Properties of cameras available through GStreamer interface + CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1 + CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast + + // Properties of cameras available through XIMEA SDK interface + CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds + + // Properties for Android cameras + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + + // Properties of cameras available through AVFOUNDATION interface + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005 + + // Properties of cameras available through Smartek Giganetix Ethernet Vision interface + /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ + ,CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006 +}; + +enum +{ + // Data given from depth generator. + CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator. + CV_CAP_OPENNI_BGR_IMAGE = 5, + CV_CAP_OPENNI_GRAY_IMAGE = 6 +}; + +// Supported output modes of OpenNI image generator +enum +{ + CV_CAP_OPENNI_VGA_30HZ = 0, + CV_CAP_OPENNI_SXGA_15HZ = 1, + CV_CAP_OPENNI_SXGA_30HZ = 2, + CV_CAP_OPENNI_QVGA_30HZ = 3, + CV_CAP_OPENNI_QVGA_60HZ = 4 +}; + +//supported by Android camera output formats +enum +{ + CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR + CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, + CV_CAP_ANDROID_GREY_FRAME = 1, //Y + CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, + CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4 +}; + +// supported Android camera flash modes +enum +{ + CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, + CV_CAP_ANDROID_FLASH_MODE_OFF, + CV_CAP_ANDROID_FLASH_MODE_ON, + CV_CAP_ANDROID_FLASH_MODE_RED_EYE, + CV_CAP_ANDROID_FLASH_MODE_TORCH +}; + +// supported Android camera focus modes +enum +{ + CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO, + CV_CAP_ANDROID_FOCUS_MODE_EDOF, + CV_CAP_ANDROID_FOCUS_MODE_FIXED, + CV_CAP_ANDROID_FOCUS_MODE_INFINITY, + CV_CAP_ANDROID_FOCUS_MODE_MACRO +}; + +// supported Android camera white balance modes +enum +{ + CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_SHADE, + CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT +}; + +// supported Android camera antibanding modes +enum +{ + CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, + CV_CAP_ANDROID_ANTIBANDING_60HZ, + CV_CAP_ANDROID_ANTIBANDING_AUTO, + CV_CAP_ANDROID_ANTIBANDING_OFF +}; + +/* retrieve or set capture properties */ +CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id ); +CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value ); + +// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY +CVAPI(int) cvGetCaptureDomain( CvCapture* capture); + +/* "black box" video file writer structure */ +typedef struct CvVideoWriter CvVideoWriter; + +CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) +{ + return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24); +} + +#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */ +#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */ + +/* initialize video file writer */ +CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, + double fps, CvSize frame_size, + int is_color CV_DEFAULT(1)); + +//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename, +// int is_color CV_DEFAULT(1)); + +/* write frame to video file */ +CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); + +/* close video file writer */ +CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); + +/****************************************************************************************\ +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvCaptureFromFile cvCreateFileCapture +#define cvCaptureFromCAM cvCreateCameraCapture +#define cvCaptureFromAVI cvCaptureFromFile +#define cvCreateAVIWriter cvCreateVideoWriter +#define cvWriteToAVI cvWriteFrame +#define cvAddSearchPath(path) +#define cvvInitSystem cvInitSystem +#define cvvNamedWindow cvNamedWindow +#define cvvShowImage cvShowImage +#define cvvResizeWindow cvResizeWindow +#define cvvDestroyWindow cvDestroyWindow +#define cvvCreateTrackbar cvCreateTrackbar +#define cvvLoadImage(name) cvLoadImage((name),1) +#define cvvSaveImage cvSaveImage +#define cvvAddSearchPath cvAddSearchPath +#define cvvWaitKey(name) cvWaitKey(0) +#define cvvWaitKeyEx(name,delay) cvWaitKey(delay) +#define cvvConvertImage cvConvertImage +#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE +#define set_preprocess_func cvSetPreprocessFuncWin32 +#define set_postprocess_func cvSetPostprocessFuncWin32 + +#if defined WIN32 || defined _WIN32 + +CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback); +CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback); +#define cvSetPreprocessFuncWin32(callback) cvSetPreprocessFuncWin32_((const void*)(callback)) +#define cvSetPostprocessFuncWin32(callback) cvSetPostprocessFuncWin32_((const void*)(callback)) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/OpenCV/Headers/imgproc/imgproc.hpp b/OpenCV/Headers/imgproc/imgproc.hpp new file mode 100644 index 0000000000..caa2d55b5c --- /dev/null +++ b/OpenCV/Headers/imgproc/imgproc.hpp @@ -0,0 +1,1270 @@ +/*! \file imgproc.hpp + \brief The Image Processing + */ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_HPP__ +#define __OPENCV_IMGPROC_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides + */ +namespace cv +{ + +//! various border interpolation methods +enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, + BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, + BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, + BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +/*! + The Base Class for 1D or Row-wise Filters + + This is the base class for linear or non-linear filters that process 1D data. + In particular, such filters are used for the "horizontal" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. +*/ +class CV_EXPORTS BaseRowFilter +{ +public: + //! the default constructor + BaseRowFilter(); + //! the destructor + virtual ~BaseRowFilter(); + //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class. + virtual void operator()(const uchar* src, uchar* dst, + int width, int cn) = 0; + int ksize, anchor; +}; + + +/*! + The Base Class for Column-wise Filters + + This is the base class for linear or non-linear filters that process columns of 2D arrays. + Such filters are used for the "vertical" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information, + i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset() + must be called (e.g. the method is called by cv::FilterEngine) + */ +class CV_EXPORTS BaseColumnFilter +{ +public: + //! the default constructor + BaseColumnFilter(); + //! the destructor + virtual ~BaseColumnFilter(); + //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width) = 0; + //! resets the internal buffers, if any + virtual void reset(); + int ksize, anchor; +}; + +/*! + The Base Class for Non-Separable 2D Filters. + + This is the base class for linear or non-linear 2D filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Similar to cv::BaseColumnFilter, the class may have some context information, + that should be reset using BaseFilter::reset() method before processing the new array. +*/ +class CV_EXPORTS BaseFilter +{ +public: + //! the default constructor + BaseFilter(); + //! the destructor + virtual ~BaseFilter(); + //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width, int cn) = 0; + //! resets the internal buffers, if any + virtual void reset(); + Size ksize; + Point anchor; +}; + +/*! + The Main Class for Image Filtering. + + The class can be used to apply an arbitrary filtering operation to an image. + It contains all the necessary intermediate buffers, it computes extrapolated values + of the "virtual" pixels outside of the image etc. + Pointers to the initialized cv::FilterEngine instances + are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(), + cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(), + cv::createBoxFilter() and cv::createMorphologyFilter(). + + Using the class you can process large images by parts and build complex pipelines + that include filtering as some of the stages. If all you need is to apply some pre-defined + filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc. + functions that create FilterEngine internally. + + Here is the example on how to use the class to implement Laplacian operator, which is the sum of + second-order derivatives. More complex variant for different types is implemented in cv::Laplacian(). + + \code + void laplace_f(const Mat& src, Mat& dst) + { + CV_Assert( src.type() == CV_32F ); + // make sure the destination array has the proper size and type + dst.create(src.size(), src.type()); + + // get the derivative and smooth kernels for d2I/dx2. + // for d2I/dy2 we could use the same kernels, just swapped + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + + // let's process 10 source rows at once + int DELTA = std::min(10, src.rows); + Ptr Fxx = createSeparableLinearFilter(src.type(), + dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr Fyy = createSeparableLinearFilter(src.type(), + dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = Fxx->start(src), dsty = 0, dy = 0; + Fyy->start(src); + const uchar* sptr = src.data + y*src.step; + + // allocate the buffers for the spatial image derivatives; + // the buffers need to have more than DELTA rows, because at the + // last iteration the output may take max(kd.rows-1,ks.rows-1) + // rows more than the input. + Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() ); + Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() ); + + // inside the loop we always pass DELTA rows to the filter + // (note that the "proceed" method takes care of possibe overflow, since + // it was given the actual image height in the "start" method) + // on output we can get: + // * < DELTA rows (the initial buffer accumulation stage) + // * = DELTA rows (settled state in the middle) + // * > DELTA rows (then the input image is over, but we generate + // "virtual" rows using the border mode and filter them) + // this variable number of output rows is dy. + // dsty is the current output row. + // sptr is the pointer to the first input row in the portion to process + for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy ) + { + Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step ); + dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe); + } + } + } + \endcode +*/ +class CV_EXPORTS FilterEngine +{ +public: + //! the default constructor + FilterEngine(); + //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty. + FilterEngine(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! the destructor + virtual ~FilterEngine(); + //! reinitializes the engine. The previously assigned filters are released. + void init(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! starts filtering of the specified ROI of an image of size wholeSize. + virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + //! starts filtering of the specified ROI of the specified image. + virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), + bool isolated=false, int maxBufRows=-1); + //! processes the next srcCount rows of the image. + virtual int proceed(const uchar* src, int srcStep, int srcCount, + uchar* dst, int dstStep); + //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. + virtual void apply( const Mat& src, Mat& dst, + const Rect& srcRoi=Rect(0,0,-1,-1), + Point dstOfs=Point(0,0), + bool isolated=false); + //! returns true if the filter is separable + bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } + //! returns the number + int remainingInputRows() const; + int remainingOutputRows() const; + + int srcType, dstType, bufType; + Size ksize; + Point anchor; + int maxWidth; + Size wholeSize; + Rect roi; + int dx1, dx2; + int rowBorderType, columnBorderType; + vector borderTab; + int borderElemSize; + vector ringBuf; + vector srcRow; + vector constBorderValue; + vector constBorderRow; + int bufStep, startY, startY0, endY, rowCount, dstY; + vector rows; + + Ptr filter2D; + Ptr rowFilter; + Ptr columnFilter; +}; + +//! type of the kernel +enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, + KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta=0, + int bits=0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor=Point(-1,-1), + double delta=0, int bits=0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point anchor=Point(-1,-1), double delta=0, + int rowBorderType=BORDER_DEFAULT, + int columnBorderType=-1, + const Scalar& borderValue=Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor=Point(-1,-1), + double delta=0, int rowBorderType=BORDER_DEFAULT, + int columnBorderType=-1, const Scalar& borderValue=Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT); +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize=false, int ktype=CV_32F ); +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType=BORDER_DEFAULT ); +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor=-1); +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor=-1, + double scale=1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT); + +//! returns the Gabor kernel with the specified parameters +CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd, + double gamma, double psi=CV_PI*0.5, int ktype=CV_64F ); + +//! type of morphological operation +enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, + MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, + MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, + MORPH_BLACKHAT=CV_MOP_BLACKHAT }; + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, + int columnBorderType=-1, + const Scalar& borderValue=morphologyDefaultBorderValue()); + +//! shape of the structuring element +enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value=Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, + OutputArray dst, Size ksize, + double sigmaX, double sigmaY=0, + int borderType=BORDER_DEFAULT ); +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType=BORDER_DEFAULT ); +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT ); +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor=Point(-1,-1), + int borderType=BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize=3, + double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize=1, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize=3, bool L2gradient=false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize=3, + int borderType=BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType=BORDER_DEFAULT ); + +// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices +CV_EXPORTS void eigen2x2( const float* a, float* e, int n ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType=BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType=BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask=noArray(), int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn=0, double stn=0 ); + +//! finds line segments in the black-n-white image using probabalistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength=0, double maxLineGap=0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1=100, double param2=100, + int minRadius=0, int maxRadius=0 ); + +enum +{ + GHT_POSITION = 0, + GHT_SCALE = 1, + GHT_ROTATION = 2 +}; + +//! finds arbitrary template in the grayscale image using Generalized Hough Transform +//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122. +//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038. +class CV_EXPORTS GeneralizedHough : public Algorithm +{ +public: + static Ptr create(int method); + + virtual ~GeneralizedHough(); + + //! set template to search + void setTemplate(InputArray templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1)); + void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)); + + //! find template on image + void detect(InputArray image, OutputArray positions, OutputArray votes = cv::noArray(), int cannyThreshold = 100); + void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = cv::noArray()); + + void release(); + +protected: + virtual void setTemplateImpl(const Mat& edges, const Mat& dx, const Mat& dy, Point templCenter) = 0; + virtual void detectImpl(const Mat& edges, const Mat& dx, const Mat& dy, OutputArray positions, OutputArray votes) = 0; + virtual void releaseImpl() = 0; + +private: + Mat edges_, dx_, dy_; +}; + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! interpolation algorithm +enum +{ + INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation + INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation + INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation + INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation + INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood + INTER_MAX=7, + WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP +}; + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx=0, double fy=0, + int interpolation=INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +enum +{ + INTER_BITS=5, INTER_BITS2=INTER_BITS*2, + INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform=true, bool accumulate=false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform=true, bool accumulate=false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate=false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ); + +/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels, + InputArray hist, OutputArray dst, Size patchSize, + int method, double factor=1 ); + +CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, Size patchSize, + int method, double factor=1 );*/ + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound=0, OutputArray flow=noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel=1, + TermCriteria termcrit=TermCriteria( + TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! class of the pixel in GrabCut algorithm +enum +{ + GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground +}; + +//! GrabCut algorithm flags +enum +{ + GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +enum +{ + DIST_LABEL_CCOMP = 0, + DIST_LABEL_PIXEL = 1 +}; + +//! builds the discrete Voronoi diagram +CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize, + int labelType=DIST_LABEL_CCOMP ); + +//! computes the distance transform map +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + +enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + + +enum +{ + COLOR_BGR2BGRA =0, + COLOR_RGB2RGBA =COLOR_BGR2BGRA, + + COLOR_BGRA2BGR =1, + COLOR_RGBA2RGB =COLOR_BGRA2BGR, + + COLOR_BGR2RGBA =2, + COLOR_RGB2BGRA =COLOR_BGR2RGBA, + + COLOR_RGBA2BGR =3, + COLOR_BGRA2RGB =COLOR_RGBA2BGR, + + COLOR_BGR2RGB =4, + COLOR_RGB2BGR =COLOR_BGR2RGB, + + COLOR_BGRA2RGBA =5, + COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY =6, + COLOR_RGB2GRAY =7, + COLOR_GRAY2BGR =8, + COLOR_GRAY2RGB =COLOR_GRAY2BGR, + COLOR_GRAY2BGRA =9, + COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY =10, + COLOR_RGBA2GRAY =11, + + COLOR_BGR2BGR565 =12, + COLOR_RGB2BGR565 =13, + COLOR_BGR5652BGR =14, + COLOR_BGR5652RGB =15, + COLOR_BGRA2BGR565 =16, + COLOR_RGBA2BGR565 =17, + COLOR_BGR5652BGRA =18, + COLOR_BGR5652RGBA =19, + + COLOR_GRAY2BGR565 =20, + COLOR_BGR5652GRAY =21, + + COLOR_BGR2BGR555 =22, + COLOR_RGB2BGR555 =23, + COLOR_BGR5552BGR =24, + COLOR_BGR5552RGB =25, + COLOR_BGRA2BGR555 =26, + COLOR_RGBA2BGR555 =27, + COLOR_BGR5552BGRA =28, + COLOR_BGR5552RGBA =29, + + COLOR_GRAY2BGR555 =30, + COLOR_BGR5552GRAY =31, + + COLOR_BGR2XYZ =32, + COLOR_RGB2XYZ =33, + COLOR_XYZ2BGR =34, + COLOR_XYZ2RGB =35, + + COLOR_BGR2YCrCb =36, + COLOR_RGB2YCrCb =37, + COLOR_YCrCb2BGR =38, + COLOR_YCrCb2RGB =39, + + COLOR_BGR2HSV =40, + COLOR_RGB2HSV =41, + + COLOR_BGR2Lab =44, + COLOR_RGB2Lab =45, + + COLOR_BayerBG2BGR =46, + COLOR_BayerGB2BGR =47, + COLOR_BayerRG2BGR =48, + COLOR_BayerGR2BGR =49, + + COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, + + COLOR_BGR2Luv =50, + COLOR_RGB2Luv =51, + COLOR_BGR2HLS =52, + COLOR_RGB2HLS =53, + + COLOR_HSV2BGR =54, + COLOR_HSV2RGB =55, + + COLOR_Lab2BGR =56, + COLOR_Lab2RGB =57, + COLOR_Luv2BGR =58, + COLOR_Luv2RGB =59, + COLOR_HLS2BGR =60, + COLOR_HLS2RGB =61, + + COLOR_BayerBG2BGR_VNG =62, + COLOR_BayerGB2BGR_VNG =63, + COLOR_BayerRG2BGR_VNG =64, + COLOR_BayerGR2BGR_VNG =65, + + COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + //YUV 4:2:0 formats family + COLOR_YUV2RGB_NV12 = 90, + COLOR_YUV2BGR_NV12 = 91, + COLOR_YUV2RGB_NV21 = 92, + COLOR_YUV2BGR_NV21 = 93, + COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, + COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, + + COLOR_YUV2RGBA_NV12 = 94, + COLOR_YUV2BGRA_NV12 = 95, + COLOR_YUV2RGBA_NV21 = 96, + COLOR_YUV2BGRA_NV21 = 97, + COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, + COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, + + COLOR_YUV2RGB_YV12 = 98, + COLOR_YUV2BGR_YV12 = 99, + COLOR_YUV2RGB_IYUV = 100, + COLOR_YUV2BGR_IYUV = 101, + COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, + COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, + COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, + COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, + + COLOR_YUV2RGBA_YV12 = 102, + COLOR_YUV2BGRA_YV12 = 103, + COLOR_YUV2RGBA_IYUV = 104, + COLOR_YUV2BGRA_IYUV = 105, + COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, + COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, + COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, + COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, + + COLOR_YUV2GRAY_420 = 106, + COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, + COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, + COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, + + //YUV 4:2:2 formats family + COLOR_YUV2RGB_UYVY = 107, + COLOR_YUV2BGR_UYVY = 108, + //COLOR_YUV2RGB_VYUY = 109, + //COLOR_YUV2BGR_VYUY = 110, + COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, + COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, + + COLOR_YUV2RGBA_UYVY = 111, + COLOR_YUV2BGRA_UYVY = 112, + //COLOR_YUV2RGBA_VYUY = 113, + //COLOR_YUV2BGRA_VYUY = 114, + COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, + COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, + + COLOR_YUV2RGB_YUY2 = 115, + COLOR_YUV2BGR_YUY2 = 116, + COLOR_YUV2RGB_YVYU = 117, + COLOR_YUV2BGR_YVYU = 118, + COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, + COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, + + COLOR_YUV2RGBA_YUY2 = 119, + COLOR_YUV2BGRA_YUY2 = 120, + COLOR_YUV2RGBA_YVYU = 121, + COLOR_YUV2BGRA_YVYU = 122, + COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, + COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, + + COLOR_YUV2GRAY_UYVY = 123, + COLOR_YUV2GRAY_YUY2 = 124, + //COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, + + // alpha premultiplication + COLOR_RGBA2mRGBA = 125, + COLOR_mRGBA2RGBA = 126, + + COLOR_COLORCVT_MAX = 127 +}; + + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); + +//! raster image moments +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + //! the conversion from CvMoments + Moments( const CvMoments& moments ); + //! the conversion to CvMoments + operator CvMoments() const; + + //! spatial moments + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! central moments + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! central normalized moments + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; +}; + +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); + +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); +CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu ); + +//! type of the template matching operation +enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + +//! mode of the contour retrieval algorithm +enum +{ + RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours + RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE=CV_RETR_TREE, //!< retrieve all the contours and the whole hierarchy + RETR_FLOODFILL=CV_RETR_FLOODFILL +}; + +//! the contour approximation algorithm +enum +{ + CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS +}; + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset=Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset=Point()); + +//! draws contours in the image +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness=1, int lineType=8, + InputArray hierarchy=noArray(), + int maxLevel=INT_MAX, Point offset=Point() ); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise=false, bool returnPoints=true ); +//! computes the contour convexity defects +CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects ); + +//! returns true if the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! finds intersection of two convex polygons +CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2, + OutputArray _p12, bool handleNested=true ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + + +class CV_EXPORTS_W Subdiv2D +{ +public: + enum + { + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; + + enum + { + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + CV_WRAP Subdiv2D(); + CV_WRAP Subdiv2D(Rect rect); + CV_WRAP void initDelaunay(Rect rect); + + CV_WRAP int insert(Point2f pt); + CV_WRAP void insert(const vector& ptvec); + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP void getEdgeList(CV_OUT vector& edgeList) const; + CV_WRAP void getTriangleList(CV_OUT vector& triangleList) const; + CV_WRAP void getVoronoiFacetList(const vector& idx, CV_OUT vector >& facetList, + CV_OUT vector& facetCenters); + + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + CV_WRAP int nextEdge(int edge) const; + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void checkSubdiv() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); + bool isvirtual() const; + bool isfree() const; + int firstEdge; + int type; + Point2f pt; + }; + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + int next[4]; + int pt[4]; + }; + + vector vtx; + vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + Point2f topLeft; + Point2f bottomRight; +}; + +} + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/OpenCV/Headers/imgproc/imgproc_c.h b/OpenCV/Headers/imgproc/imgproc_c.h new file mode 100644 index 0000000000..c7b525c96d --- /dev/null +++ b/OpenCV/Headers/imgproc/imgproc_c.h @@ -0,0 +1,623 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__ +#define __OPENCV_IMGPROC_IMGPROC_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** Background statistics accumulation *****************************/ + +/* Adds image to accumulator */ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds squared image to accumulator */ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds a product of two images to accumulator */ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/* Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/* Smoothes array (removes noise) */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/* Convolves the image with the kernel */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/* Finds integral image: SUM(X,Y) = sum(x. + After that sum of histogram bins is equal to */ +CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor ); + + +/* Clear all histogram bins that are below the threshold */ +CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold ); + + +/* Compares two histogram */ +CVAPI(double) cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method); + +/* Copies one histogram to another. Destination histogram is created if + the destination pointer is NULL */ +CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst ); + + +/* Calculates bayesian probabilistic histograms + (each or src and dst is an array of histograms */ +CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number, + CvHistogram** dst); + +/* Calculates array histogram */ +CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ); + +CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ) +{ + cvCalcArrHist( (CvArr**)image, hist, accumulate, mask ); +} + +/* Calculates back project */ +CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst, + const CvHistogram* hist ); +#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist) + + +/* Does some sort of template matching but compares histograms of + template and each window location */ +CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range, + CvHistogram* hist, int method, + double factor ); +#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \ + cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor ) + + +/* calculates probabilistic density (divides one histogram by another) */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/* equalizes histogram of 8-bit single-channel image */ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/* Applies distance transform to binary image */ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL), + int labelType CV_DEFAULT(CV_DIST_LABEL_CCOMP)); + + +/* Applies fixed-level threshold to grayscale image. + This is a basic operation applied before retrieving contours */ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/* Applies adaptive threshold to grayscale image. + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/* Fills the connected component until the color difference gets large enough */ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/* Runs canny edge detector */ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/* Calculates constraint image for corner detection + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners */ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/* Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel */ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel */ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Harris corner detector: + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_responce, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/* Adjust corner position using some sort of gradient search */ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/* Finds a sparse set of points within the selected region + that seem to be easy to track */ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/* Finds lines on binary image using one of several methods. + line_storage is either memory storage or 1 x CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale */ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0)); + +/* Finds circles in the image */ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/OpenCV/Headers/imgproc/types_c.h b/OpenCV/Headers/imgproc/types_c.h new file mode 100644 index 0000000000..0e4f0a2445 --- /dev/null +++ b/OpenCV/Headers/imgproc/types_c.h @@ -0,0 +1,626 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_TYPES_C_H__ +#define __OPENCV_IMGPROC_TYPES_C_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /* area of the connected component */ + CvScalar value; /* average color of the connected component */ + CvRect rect; /* ROI of the component */ + CvSeq* contour; /* optional component boundary + (the contour might have child contours corresponding to the holes)*/ +} +CvConnectedComp; + +/* Image smooth methods */ +enum +{ + CV_BLUR_NO_SCALE =0, + CV_BLUR =1, + CV_GAUSSIAN =2, + CV_MEDIAN =3, + CV_BILATERAL =4 +}; + +/* Filters used in pyramid decomposition */ +enum +{ + CV_GAUSSIAN_5x5 = 7 +}; + +/* Special filters */ +enum +{ + CV_SCHARR =-1, + CV_MAX_SOBEL_KSIZE =7 +}; + +/* Constants for color conversion */ +enum +{ + CV_BGR2BGRA =0, + CV_RGB2RGBA =CV_BGR2BGRA, + + CV_BGRA2BGR =1, + CV_RGBA2RGB =CV_BGRA2BGR, + + CV_BGR2RGBA =2, + CV_RGB2BGRA =CV_BGR2RGBA, + + CV_RGBA2BGR =3, + CV_BGRA2RGB =CV_RGBA2BGR, + + CV_BGR2RGB =4, + CV_RGB2BGR =CV_BGR2RGB, + + CV_BGRA2RGBA =5, + CV_RGBA2BGRA =CV_BGRA2RGBA, + + CV_BGR2GRAY =6, + CV_RGB2GRAY =7, + CV_GRAY2BGR =8, + CV_GRAY2RGB =CV_GRAY2BGR, + CV_GRAY2BGRA =9, + CV_GRAY2RGBA =CV_GRAY2BGRA, + CV_BGRA2GRAY =10, + CV_RGBA2GRAY =11, + + CV_BGR2BGR565 =12, + CV_RGB2BGR565 =13, + CV_BGR5652BGR =14, + CV_BGR5652RGB =15, + CV_BGRA2BGR565 =16, + CV_RGBA2BGR565 =17, + CV_BGR5652BGRA =18, + CV_BGR5652RGBA =19, + + CV_GRAY2BGR565 =20, + CV_BGR5652GRAY =21, + + CV_BGR2BGR555 =22, + CV_RGB2BGR555 =23, + CV_BGR5552BGR =24, + CV_BGR5552RGB =25, + CV_BGRA2BGR555 =26, + CV_RGBA2BGR555 =27, + CV_BGR5552BGRA =28, + CV_BGR5552RGBA =29, + + CV_GRAY2BGR555 =30, + CV_BGR5552GRAY =31, + + CV_BGR2XYZ =32, + CV_RGB2XYZ =33, + CV_XYZ2BGR =34, + CV_XYZ2RGB =35, + + CV_BGR2YCrCb =36, + CV_RGB2YCrCb =37, + CV_YCrCb2BGR =38, + CV_YCrCb2RGB =39, + + CV_BGR2HSV =40, + CV_RGB2HSV =41, + + CV_BGR2Lab =44, + CV_RGB2Lab =45, + + CV_BayerBG2BGR =46, + CV_BayerGB2BGR =47, + CV_BayerRG2BGR =48, + CV_BayerGR2BGR =49, + + CV_BayerBG2RGB =CV_BayerRG2BGR, + CV_BayerGB2RGB =CV_BayerGR2BGR, + CV_BayerRG2RGB =CV_BayerBG2BGR, + CV_BayerGR2RGB =CV_BayerGB2BGR, + + CV_BGR2Luv =50, + CV_RGB2Luv =51, + CV_BGR2HLS =52, + CV_RGB2HLS =53, + + CV_HSV2BGR =54, + CV_HSV2RGB =55, + + CV_Lab2BGR =56, + CV_Lab2RGB =57, + CV_Luv2BGR =58, + CV_Luv2RGB =59, + CV_HLS2BGR =60, + CV_HLS2RGB =61, + + CV_BayerBG2BGR_VNG =62, + CV_BayerGB2BGR_VNG =63, + CV_BayerRG2BGR_VNG =64, + CV_BayerGR2BGR_VNG =65, + + CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG, + CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG, + CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG, + CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG, + + CV_BGR2HSV_FULL = 66, + CV_RGB2HSV_FULL = 67, + CV_BGR2HLS_FULL = 68, + CV_RGB2HLS_FULL = 69, + + CV_HSV2BGR_FULL = 70, + CV_HSV2RGB_FULL = 71, + CV_HLS2BGR_FULL = 72, + CV_HLS2RGB_FULL = 73, + + CV_LBGR2Lab = 74, + CV_LRGB2Lab = 75, + CV_LBGR2Luv = 76, + CV_LRGB2Luv = 77, + + CV_Lab2LBGR = 78, + CV_Lab2LRGB = 79, + CV_Luv2LBGR = 80, + CV_Luv2LRGB = 81, + + CV_BGR2YUV = 82, + CV_RGB2YUV = 83, + CV_YUV2BGR = 84, + CV_YUV2RGB = 85, + + CV_BayerBG2GRAY = 86, + CV_BayerGB2GRAY = 87, + CV_BayerRG2GRAY = 88, + CV_BayerGR2GRAY = 89, + + //YUV 4:2:0 formats family + CV_YUV2RGB_NV12 = 90, + CV_YUV2BGR_NV12 = 91, + CV_YUV2RGB_NV21 = 92, + CV_YUV2BGR_NV21 = 93, + CV_YUV420sp2RGB = CV_YUV2RGB_NV21, + CV_YUV420sp2BGR = CV_YUV2BGR_NV21, + + CV_YUV2RGBA_NV12 = 94, + CV_YUV2BGRA_NV12 = 95, + CV_YUV2RGBA_NV21 = 96, + CV_YUV2BGRA_NV21 = 97, + CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21, + CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21, + + CV_YUV2RGB_YV12 = 98, + CV_YUV2BGR_YV12 = 99, + CV_YUV2RGB_IYUV = 100, + CV_YUV2BGR_IYUV = 101, + CV_YUV2RGB_I420 = CV_YUV2RGB_IYUV, + CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV, + CV_YUV420p2RGB = CV_YUV2RGB_YV12, + CV_YUV420p2BGR = CV_YUV2BGR_YV12, + + CV_YUV2RGBA_YV12 = 102, + CV_YUV2BGRA_YV12 = 103, + CV_YUV2RGBA_IYUV = 104, + CV_YUV2BGRA_IYUV = 105, + CV_YUV2RGBA_I420 = CV_YUV2RGBA_IYUV, + CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV, + CV_YUV420p2RGBA = CV_YUV2RGBA_YV12, + CV_YUV420p2BGRA = CV_YUV2BGRA_YV12, + + CV_YUV2GRAY_420 = 106, + CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420, + CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420, + CV_YUV2GRAY_YV12 = CV_YUV2GRAY_420, + CV_YUV2GRAY_IYUV = CV_YUV2GRAY_420, + CV_YUV2GRAY_I420 = CV_YUV2GRAY_420, + CV_YUV420sp2GRAY = CV_YUV2GRAY_420, + CV_YUV420p2GRAY = CV_YUV2GRAY_420, + + //YUV 4:2:2 formats family + CV_YUV2RGB_UYVY = 107, + CV_YUV2BGR_UYVY = 108, + //CV_YUV2RGB_VYUY = 109, + //CV_YUV2BGR_VYUY = 110, + CV_YUV2RGB_Y422 = CV_YUV2RGB_UYVY, + CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY, + CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY, + CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY, + + CV_YUV2RGBA_UYVY = 111, + CV_YUV2BGRA_UYVY = 112, + //CV_YUV2RGBA_VYUY = 113, + //CV_YUV2BGRA_VYUY = 114, + CV_YUV2RGBA_Y422 = CV_YUV2RGBA_UYVY, + CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY, + CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY, + CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY, + + CV_YUV2RGB_YUY2 = 115, + CV_YUV2BGR_YUY2 = 116, + CV_YUV2RGB_YVYU = 117, + CV_YUV2BGR_YVYU = 118, + CV_YUV2RGB_YUYV = CV_YUV2RGB_YUY2, + CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2, + CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2, + CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2, + + CV_YUV2RGBA_YUY2 = 119, + CV_YUV2BGRA_YUY2 = 120, + CV_YUV2RGBA_YVYU = 121, + CV_YUV2BGRA_YVYU = 122, + CV_YUV2RGBA_YUYV = CV_YUV2RGBA_YUY2, + CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2, + CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2, + CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2, + + CV_YUV2GRAY_UYVY = 123, + CV_YUV2GRAY_YUY2 = 124, + //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_Y422 = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_UYNV = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2, + CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2, + CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2, + + // alpha premultiplication + CV_RGBA2mRGBA = 125, + CV_mRGBA2RGBA = 126, + + CV_COLORCVT_MAX = 127 +}; + + +/* Sub-pixel interpolation methods */ +enum +{ + CV_INTER_NN =0, + CV_INTER_LINEAR =1, + CV_INTER_CUBIC =2, + CV_INTER_AREA =3, + CV_INTER_LANCZOS4 =4 +}; + +/* ... and other image warping flags */ +enum +{ + CV_WARP_FILL_OUTLIERS =8, + CV_WARP_INVERSE_MAP =16 +}; + +/* Shapes of a structuring element for morphological operations */ +enum +{ + CV_SHAPE_RECT =0, + CV_SHAPE_CROSS =1, + CV_SHAPE_ELLIPSE =2, + CV_SHAPE_CUSTOM =100 +}; + +/* Morphological operations */ +enum +{ + CV_MOP_ERODE =0, + CV_MOP_DILATE =1, + CV_MOP_OPEN =2, + CV_MOP_CLOSE =3, + CV_MOP_GRADIENT =4, + CV_MOP_TOPHAT =5, + CV_MOP_BLACKHAT =6 +}; + +/* Spatial and central moments */ +typedef struct CvMoments +{ + double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ + double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ + double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ +} +CvMoments; + +/* Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ +} +CvHuMoments; + +/* Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/* Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3, + CV_RETR_FLOODFILL=4 +}; + +/* Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequental retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/* Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +typedef size_t CvSubdiv2DEdge; + +#define CV_QUADEDGE2D_FIELDS() \ + int flags; \ + struct CvSubdiv2DPoint* pt[4]; \ + CvSubdiv2DEdge next[4]; + +#define CV_SUBDIV2D_POINT_FIELDS()\ + int flags; \ + CvSubdiv2DEdge first; \ + CvPoint2D32f pt; \ + int id; + +#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) + +typedef struct CvQuadEdge2D +{ + CV_QUADEDGE2D_FIELDS() +} +CvQuadEdge2D; + +typedef struct CvSubdiv2DPoint +{ + CV_SUBDIV2D_POINT_FIELDS() +} +CvSubdiv2DPoint; + +#define CV_SUBDIV2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + int quad_edges; \ + int is_geometry_valid; \ + CvSubdiv2DEdge recent_edge; \ + CvPoint2D32f topleft; \ + CvPoint2D32f bottomright; + +typedef struct CvSubdiv2D +{ + CV_SUBDIV2D_FIELDS() +} +CvSubdiv2D; + + +typedef enum CvSubdiv2DPointLocation +{ + CV_PTLOC_ERROR = -2, + CV_PTLOC_OUTSIDE_RECT = -1, + CV_PTLOC_INSIDE = 0, + CV_PTLOC_VERTEX = 1, + CV_PTLOC_ON_EDGE = 2 +} +CvSubdiv2DPointLocation; + +typedef enum CvNextEdgeType +{ + CV_NEXT_AROUND_ORG = 0x00, + CV_NEXT_AROUND_DST = 0x22, + CV_PREV_AROUND_ORG = 0x11, + CV_PREV_AROUND_DST = 0x33, + CV_NEXT_AROUND_LEFT = 0x13, + CV_NEXT_AROUND_RIGHT = 0x31, + CV_PREV_AROUND_LEFT = 0x20, + CV_PREV_AROUND_RIGHT = 0x02 +} +CvNextEdgeType; + +/* get the next edge with the same origin point (counterwise) */ +#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3]) + + +/* Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/* Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, + CV_CONTOURS_MATCH_I2 =2, + CV_CONTOURS_MATCH_I3 =3 +}; + +/* Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/* Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /* point of the contour where the defect begins */ + CvPoint* end; /* point of the contour where the defect ends */ + CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ + float depth; /* distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/* Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3, + CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA +}; + +/* Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/* Content of output label array: connected components or pixels */ +enum +{ + CV_DIST_LABEL_CCOMP = 0, + CV_DIST_LABEL_PIXEL = 1 +}; + +/* Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /* User defined distance */ + CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /* the simple euclidean distance */ + CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /* distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ +}; + +/* Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/* FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/* Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/* Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/OpenCV/Headers/legacy/blobtrack.hpp b/OpenCV/Headers/legacy/blobtrack.hpp new file mode 100644 index 0000000000..496b8be20e --- /dev/null +++ b/OpenCV/Headers/legacy/blobtrack.hpp @@ -0,0 +1,948 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_VIDEOSURVEILLANCE_H__ +#define __OPENCV_VIDEOSURVEILLANCE_H__ + +/* Turn off the functionality until cvaux/src/Makefile.am gets updated: */ +//#if _MSC_VER >= 1200 + +#include "opencv2/core/core_c.h" +#include + +#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__ +#define cv_stricmp stricmp +#define cv_strnicmp strnicmp +#if defined WINCE +#define strdup _strdup +#define stricmp _stricmp +#endif +#elif defined __GNUC__ || defined __sun +#define cv_stricmp strcasecmp +#define cv_strnicmp strncasecmp +#else +#error Do not know how to make case-insensitive string comparison on this platform +#endif + +//struct DefParam; +struct CvDefParam +{ + struct CvDefParam* next; + char* pName; + char* pComment; + double* pDouble; + double Double; + float* pFloat; + float Float; + int* pInt; + int Int; + char** pStr; + char* Str; +}; + +class CV_EXPORTS CvVSModule +{ +private: /* Internal data: */ + CvDefParam* m_pParamList; + char* m_pModuleTypeName; + char* m_pModuleName; + char* m_pNickName; +protected: + int m_Wnd; +public: /* Constructor and destructor: */ + CvVSModule(); + virtual ~CvVSModule(); +private: /* Internal functions: */ + void FreeParam(CvDefParam** pp); + CvDefParam* NewParam(const char* name); + CvDefParam* GetParamPtr(int index); + CvDefParam* GetParamPtr(const char* name); +protected: /* INTERNAL INTERFACE */ + int IsParam(const char* name); + void AddParam(const char* name, double* pAddr); + void AddParam(const char* name, float* pAddr); + void AddParam(const char* name, int* pAddr); + void AddParam(const char* name, const char** pAddr); + void AddParam(const char* name); + void CommentParam(const char* name, const char* pComment); + void SetTypeName(const char* name); + void SetModuleName(const char* name); + void DelParam(const char* name); + +public: /* EXTERNAL INTERFACE */ + const char* GetParamName(int index); + const char* GetParamComment(const char* name); + double GetParam(const char* name); + const char* GetParamStr(const char* name); + void SetParam(const char* name, double val); + void SetParamStr(const char* name, const char* str); + void TransferParamsFromChild(CvVSModule* pM, const char* prefix = NULL); + void TransferParamsToChild(CvVSModule* pM, char* prefix = NULL); + virtual void ParamUpdate(); + const char* GetTypeName(); + int IsModuleTypeName(const char* name); + char* GetModuleName(); + int IsModuleName(const char* name); + void SetNickName(const char* pStr); + const char* GetNickName(); + virtual void SaveState(CvFileStorage*); + virtual void LoadState(CvFileStorage*, CvFileNode*); + + virtual void Release() = 0; +};/* CvVMModule */ + +CV_EXPORTS void cvWriteStruct(CvFileStorage* fs, const char* name, void* addr, const char* desc, int num=1); +CV_EXPORTS void cvReadStructByName(CvFileStorage* fs, CvFileNode* node, const char* name, void* addr, const char* desc); + +/* FOREGROUND DETECTOR INTERFACE */ +class CV_EXPORTS CvFGDetector : public CvVSModule +{ +public: + CvFGDetector(); + virtual IplImage* GetMask() = 0; + /* Process current image: */ + virtual void Process(IplImage* pImg) = 0; + /* Release foreground detector: */ + virtual void Release() = 0; +}; + +CV_EXPORTS void cvReleaseFGDetector(CvFGDetector** ppT ); +CV_EXPORTS CvFGDetector* cvCreateFGDetectorBase(int type, void *param); + + +/* BLOB STRUCTURE*/ +struct CvBlob +{ + float x,y; /* blob position */ + float w,h; /* blob sizes */ + int ID; /* blob ID */ +}; + +inline CvBlob cvBlob(float x,float y, float w, float h) +{ + CvBlob B = {x,y,w,h,0}; + return B; +} +#define CV_BLOB_MINW 5 +#define CV_BLOB_MINH 5 +#define CV_BLOB_ID(pB) (((CvBlob*)(pB))->ID) +#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y) +#define CV_BLOB_X(pB) (((CvBlob*)(pB))->x) +#define CV_BLOB_Y(pB) (((CvBlob*)(pB))->y) +#define CV_BLOB_WX(pB) (((CvBlob*)(pB))->w) +#define CV_BLOB_WY(pB) (((CvBlob*)(pB))->h) +#define CV_BLOB_RX(pB) (0.5f*CV_BLOB_WX(pB)) +#define CV_BLOB_RY(pB) (0.5f*CV_BLOB_WY(pB)) +#define CV_BLOB_RECT(pB) cvRect(cvRound(((CvBlob*)(pB))->x-CV_BLOB_RX(pB)),cvRound(((CvBlob*)(pB))->y-CV_BLOB_RY(pB)),cvRound(CV_BLOB_WX(pB)),cvRound(CV_BLOB_WY(pB))) +/* END BLOB STRUCTURE*/ + + +/* simple BLOBLIST */ +class CV_EXPORTS CvBlobSeq +{ +public: + CvBlobSeq(int BlobSize = sizeof(CvBlob)) + { + m_pMem = cvCreateMemStorage(); + m_pSeq = cvCreateSeq(0,sizeof(CvSeq),BlobSize,m_pMem); + strcpy(m_pElemFormat,"ffffi"); + } + virtual ~CvBlobSeq() + { + cvReleaseMemStorage(&m_pMem); + }; + virtual CvBlob* GetBlob(int BlobIndex) + { + return (CvBlob*)cvGetSeqElem(m_pSeq,BlobIndex); + }; + virtual CvBlob* GetBlobByID(int BlobID) + { + int i; + for(i=0; itotal; ++i) + if(BlobID == CV_BLOB_ID(GetBlob(i))) + return GetBlob(i); + return NULL; + }; + virtual void DelBlob(int BlobIndex) + { + cvSeqRemove(m_pSeq,BlobIndex); + }; + virtual void DelBlobByID(int BlobID) + { + int i; + for(i=0; itotal; ++i) + { + if(BlobID == CV_BLOB_ID(GetBlob(i))) + { + DelBlob(i); + return; + } + } + }; + virtual void Clear() + { + cvClearSeq(m_pSeq); + }; + virtual void AddBlob(CvBlob* pB) + { + cvSeqPush(m_pSeq,pB); + }; + virtual int GetBlobNum() + { + return m_pSeq->total; + }; + virtual void Write(CvFileStorage* fs, const char* name) + { + const char* attr[] = {"dt",m_pElemFormat,NULL}; + if(fs) + { + cvWrite(fs,name,m_pSeq,cvAttrList(attr,NULL)); + } + } + virtual void Load(CvFileStorage* fs, CvFileNode* node) + { + if(fs==NULL) return; + CvSeq* pSeq = (CvSeq*)cvRead(fs, node); + if(pSeq) + { + int i; + cvClearSeq(m_pSeq); + for(i=0;itotal;++i) + { + void* pB = cvGetSeqElem( pSeq, i ); + cvSeqPush( m_pSeq, pB ); + } + } + } + void AddFormat(const char* str){strcat(m_pElemFormat,str);} +protected: + CvMemStorage* m_pMem; + CvSeq* m_pSeq; + char m_pElemFormat[1024]; +}; +/* simple BLOBLIST */ + + +/* simple TRACKLIST */ +struct CvBlobTrack +{ + int TrackID; + int StartFrame; + CvBlobSeq* pBlobSeq; +}; + +class CV_EXPORTS CvBlobTrackSeq +{ +public: + CvBlobTrackSeq(int TrackSize = sizeof(CvBlobTrack)); + virtual ~CvBlobTrackSeq(); + virtual CvBlobTrack* GetBlobTrack(int TrackIndex); + virtual CvBlobTrack* GetBlobTrackByID(int TrackID); + virtual void DelBlobTrack(int TrackIndex); + virtual void DelBlobTrackByID(int TrackID); + virtual void Clear(); + virtual void AddBlobTrack(int TrackID, int StartFrame = 0); + virtual int GetBlobTrackNum(); +protected: + CvMemStorage* m_pMem; + CvSeq* m_pSeq; +}; + +/* simple TRACKLIST */ + + +/* BLOB DETECTOR INTERFACE */ +class CV_EXPORTS CvBlobDetector: public CvVSModule +{ +public: + CvBlobDetector(){SetTypeName("BlobDetector");}; + /* Try to detect new blob entrance based on foreground mask. */ + /* pFGMask - image of foreground mask */ + /* pNewBlob - pointer to CvBlob structure which will be filled if new blob entrance detected */ + /* pOldBlobList - pointer to blob list which already exist on image */ + virtual int DetectNewBlob(IplImage* pImg, IplImage* pImgFG, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) = 0; + /* release blob detector */ + virtual void Release()=0; +}; + +/* Release any blob detector: */ +CV_EXPORTS void cvReleaseBlobDetector(CvBlobDetector** ppBD); + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorSimple(); +CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorCC(); + +struct CV_EXPORTS CvDetectedBlob : public CvBlob +{ + float response; +}; + +CV_INLINE CvDetectedBlob cvDetectedBlob( float x, float y, float w, float h, int ID = 0, float response = 0.0F ) +{ + CvDetectedBlob b; + b.x = x; b.y = y; b.w = w; b.h = h; b.ID = ID; b.response = response; + return b; +} + + +class CV_EXPORTS CvObjectDetector +{ +public: + CvObjectDetector( const char* /*detector_file_name*/ = 0 ); + ~CvObjectDetector(); + + /* + * Release the current detector and load new detector from file + * (if detector_file_name is not 0) + * Return true on success: + */ + bool Load( const char* /*detector_file_name*/ = 0 ); + + /* Return min detector window size: */ + CvSize GetMinWindowSize() const; + + /* Return max border: */ + int GetMaxBorderSize() const; + + /* + * Detect the object on the image and push the detected + * blobs into which must be the sequence of s + */ + void Detect( const CvArr* /*img*/, /* out */ CvBlobSeq* /*detected_blob_seq*/ = 0 ); + +protected: + class CvObjectDetectorImpl* impl; +}; + + +CV_INLINE CvRect cvRectIntersection( const CvRect r1, const CvRect r2 ) +{ + CvRect r = cvRect( MAX(r1.x, r2.x), MAX(r1.y, r2.y), 0, 0 ); + + r.width = MIN(r1.x + r1.width, r2.x + r2.width) - r.x; + r.height = MIN(r1.y + r1.height, r2.y + r2.height) - r.y; + + return r; +} + + +/* + * CvImageDrawer + * + * Draw on an image the specified ROIs from the source image and + * given blobs as ellipses or rectangles: + */ + +struct CvDrawShape +{ + enum {RECT, ELLIPSE} shape; + CvScalar color; +}; + +/*extern const CvDrawShape icv_shape[] = +{ + { CvDrawShape::ELLIPSE, CV_RGB(255,0,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,255,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,0,255) }, + { CvDrawShape::ELLIPSE, CV_RGB(255,255,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,255,255) }, + { CvDrawShape::ELLIPSE, CV_RGB(255,0,255) } +};*/ + +class CV_EXPORTS CvImageDrawer +{ +public: + CvImageDrawer() : m_image(0) {} + ~CvImageDrawer() { cvReleaseImage( &m_image ); } + void SetShapes( const CvDrawShape* shapes, int num ); + /* must be the sequence of s */ + IplImage* Draw( const CvArr* src, CvBlobSeq* blob_seq = 0, const CvSeq* roi_seq = 0 ); + IplImage* GetImage() { return m_image; } +protected: + //static const int MAX_SHAPES = sizeof(icv_shape) / sizeof(icv_shape[0]);; + + IplImage* m_image; + CvDrawShape m_shape[16]; +}; + + + +/* Trajectory generation module: */ +class CV_EXPORTS CvBlobTrackGen: public CvVSModule +{ +public: + CvBlobTrackGen(){SetTypeName("BlobTrackGen");}; + virtual void SetFileName(char* pFileName) = 0; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg = NULL, IplImage* pFG = NULL) = 0; + virtual void Release() = 0; +}; + +inline void cvReleaseBlobTrackGen(CvBlobTrackGen** pBTGen) +{ + if(*pBTGen)(*pBTGen)->Release(); + *pBTGen = 0; +} + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGen1(); +CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGenYML(); + + + +/* BLOB TRACKER INTERFACE */ +class CV_EXPORTS CvBlobTracker: public CvVSModule +{ +public: + CvBlobTracker(); + + /* Add new blob to track it and assign to this blob personal ID */ + /* pBlob - pointer to structure with blob parameters (ID is ignored)*/ + /* pImg - current image */ + /* pImgFG - current foreground mask */ + /* Return pointer to new added blob: */ + virtual CvBlob* AddBlob(CvBlob* pBlob, IplImage* pImg, IplImage* pImgFG = NULL ) = 0; + + /* Return number of currently tracked blobs: */ + virtual int GetBlobNum() = 0; + + /* Return pointer to specified by index blob: */ + virtual CvBlob* GetBlob(int BlobIndex) = 0; + + /* Delete blob by its index: */ + virtual void DelBlob(int BlobIndex) = 0; + + /* Process current image and track all existed blobs: */ + virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) = 0; + + /* Release blob tracker: */ + virtual void Release() = 0; + + + /* Process one blob (for multi hypothesis tracing): */ + virtual void ProcessBlob(int BlobIndex, CvBlob* pBlob, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + /* Get confidence/wieght/probability (0-1) for blob: */ + virtual double GetConfidence(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + virtual double GetConfidenceList(CvBlobSeq* pBlobList, IplImage* pImg, IplImage* pImgFG = NULL); + + virtual void UpdateBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + /* Update all blob models: */ + virtual void Update(IplImage* pImg, IplImage* pImgFG = NULL); + + /* Return pointer to blob by its unique ID: */ + virtual int GetBlobIndexByID(int BlobID); + + /* Return pointer to blob by its unique ID: */ + virtual CvBlob* GetBlobByID(int BlobID); + + /* Delete blob by its ID: */ + virtual void DelBlobByID(int BlobID); + + /* Set new parameters for specified (by index) blob: */ + virtual void SetBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/); + + /* Set new parameters for specified (by ID) blob: */ + virtual void SetBlobByID(int BlobID, CvBlob* pBlob); + + /* =============== MULTI HYPOTHESIS INTERFACE ================== */ + + /* Return number of position hyposetis of currently tracked blob: */ + virtual int GetBlobHypNum(int /*BlobIdx*/); + + /* Return pointer to specified blob hypothesis by index blob: */ + virtual CvBlob* GetBlobHyp(int BlobIndex, int /*hypothesis*/); + + /* Set new parameters for specified (by index) blob hyp + * (can be called several times for each hyp ): + */ + virtual void SetBlobHyp(int /*BlobIndex*/, CvBlob* /*pBlob*/); +}; + +CV_EXPORTS void cvReleaseBlobTracker(CvBlobTracker**ppT ); +/* BLOB TRACKER INTERFACE */ + +/*BLOB TRACKER ONE INTERFACE */ +class CV_EXPORTS CvBlobTrackerOne : public CvVSModule +{ +public: + virtual void Init(CvBlob* pBlobInit, IplImage* pImg, IplImage* pImgFG = NULL) = 0; + virtual CvBlob* Process(CvBlob* pBlobPrev, IplImage* pImg, IplImage* pImgFG = NULL) = 0; + virtual void Release() = 0; + + /* Non-required methods: */ + virtual void SkipProcess(CvBlob* /*pBlobPrev*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){}; + virtual void Update(CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){}; + virtual void SetCollision(int /*CollisionFlag*/){}; /* call in case of blob collision situation*/ + virtual double GetConfidence(CvBlob* /*pBlob*/, IplImage* /*pImg*/, + IplImage* /*pImgFG*/ = NULL, IplImage* /*pImgUnusedReg*/ = NULL) + { + return 1; + }; +}; +inline void cvReleaseBlobTrackerOne(CvBlobTrackerOne **ppT ) +{ + ppT[0]->Release(); + ppT[0] = 0; +} +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerList(CvBlobTrackerOne* (*create)()); +/*BLOB TRACKER ONE INTERFACE */ + +/* Declarations of constructors of implemented modules: */ + +/* Some declarations for specific MeanShift tracker: */ +#define PROFILE_EPANECHNIKOV 0 +#define PROFILE_DOG 1 +struct CvBlobTrackerParamMS +{ + int noOfSigBits; + int appearance_profile; + int meanshift_profile; + float sigma; +}; + +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1(CvBlobTrackerParamMS* param); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS2(CvBlobTrackerParamMS* param); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1ByList(); + +/* Some declarations for specific Likelihood tracker: */ +struct CvBlobTrackerParamLH +{ + int HistType; /* see Prob.h */ + int ScaleAfter; +}; + +/* Without scale optimization: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHR(CvBlobTrackerParamLH* /*param*/ = NULL); + +/* With scale optimization: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHRS(CvBlobTrackerParamLH* /*param*/ = NULL); + +/* Simple blob tracker based on connected component tracking: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCC(); + +/* Connected component tracking and mean-shift particle filter collion-resolver: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCCMSPF(); + +/* Blob tracker that integrates meanshift and connected components: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFG(); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFGS(); + +/* Meanshift without connected-components */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS(); + +/* Particle filtering via Bhattacharya coefficient, which */ +/* is roughly the dot-product of two probability densities. */ +/* See: Real-Time Tracking of Non-Rigid Objects using Mean Shift */ +/* Comanicius, Ramesh, Meer, 2000, 8p */ +/* http://citeseer.ist.psu.edu/321441.html */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSPF(); + +/* =========== tracker integrators trackers =============*/ + +/* Integrator based on Particle Filtering method: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPF(); + +/* Rule based integrator: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIRB(); + +/* Integrator based on data fusion using particle filtering: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPFDF(); + + + + +/* Trajectory postprocessing module: */ +class CV_EXPORTS CvBlobTrackPostProc: public CvVSModule +{ +public: + CvBlobTrackPostProc(){SetTypeName("BlobTrackPostProc");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process() = 0; + virtual int GetBlobNum() = 0; + virtual CvBlob* GetBlob(int index) = 0; + virtual void Release() = 0; + + /* Additional functionality: */ + virtual CvBlob* GetBlobByID(int BlobID) + { + int i; + for(i=GetBlobNum();i>0;i--) + { + CvBlob* pB=GetBlob(i-1); + if(pB->ID==BlobID) return pB; + } + return NULL; + }; +}; + +inline void cvReleaseBlobTrackPostProc(CvBlobTrackPostProc** pBTPP) +{ + if(pBTPP == NULL) return; + if(*pBTPP)(*pBTPP)->Release(); + *pBTPP = 0; +} + +/* Trajectory generation module: */ +class CV_EXPORTS CvBlobTrackPostProcOne: public CvVSModule +{ +public: + CvBlobTrackPostProcOne(){SetTypeName("BlobTrackPostOne");}; + virtual CvBlob* Process(CvBlob* pBlob) = 0; + virtual void Release() = 0; +}; + +/* Create blob tracking post processing module based on simle module: */ +CV_EXPORTS CvBlobTrackPostProc* cvCreateBlobTrackPostProcList(CvBlobTrackPostProcOne* (*create)()); + + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcKalman(); +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverRect(); +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverExp(); + + +/* PREDICTORS */ +/* blob PREDICTOR */ +class CvBlobTrackPredictor: public CvVSModule +{ +public: + CvBlobTrackPredictor(){SetTypeName("BlobTrackPredictor");}; + virtual CvBlob* Predict() = 0; + virtual void Update(CvBlob* pBlob) = 0; + virtual void Release() = 0; +}; +CV_EXPORTS CvBlobTrackPredictor* cvCreateModuleBlobTrackPredictKalman(); + + + +/* Trajectory analyser module: */ +class CV_EXPORTS CvBlobTrackAnalysis: public CvVSModule +{ +public: + CvBlobTrackAnalysis(){SetTypeName("BlobTrackAnalysis");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg, IplImage* pFG) = 0; + virtual float GetState(int BlobID) = 0; + /* return 0 if trajectory is normal + return >0 if trajectory abnormal */ + virtual const char* GetStateDesc(int /*BlobID*/){return NULL;}; + virtual void SetFileName(char* /*DataBaseName*/){}; + virtual void Release() = 0; +}; + + +inline void cvReleaseBlobTrackAnalysis(CvBlobTrackAnalysis** pBTPP) +{ + if(pBTPP == NULL) return; + if(*pBTPP)(*pBTPP)->Release(); + *pBTPP = 0; +} + +/* Feature-vector generation module: */ +class CV_EXPORTS CvBlobTrackFVGen : public CvVSModule +{ +public: + CvBlobTrackFVGen(){SetTypeName("BlobTrackFVGen");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg, IplImage* pFG) = 0; + virtual void Release() = 0; + virtual int GetFVSize() = 0; + virtual int GetFVNum() = 0; + virtual float* GetFV(int index, int* pFVID) = 0; /* Returns pointer to FV, if return 0 then FV not created */ + virtual float* GetFVVar(){return NULL;}; /* Returns pointer to array of variation of values of FV, if returns 0 then FVVar does not exist. */ + virtual float* GetFVMin() = 0; /* Returns pointer to array of minimal values of FV, if returns 0 then FVrange does not exist */ + virtual float* GetFVMax() = 0; /* Returns pointer to array of maximal values of FV, if returns 0 then FVrange does not exist */ +}; + + +/* Trajectory Analyser module: */ +class CV_EXPORTS CvBlobTrackAnalysisOne +{ +public: + virtual ~CvBlobTrackAnalysisOne() {}; + virtual int Process(CvBlob* pBlob, IplImage* pImg, IplImage* pFG) = 0; + /* return 0 if trajectory is normal + return >0 if trajectory abnormal */ + virtual void Release() = 0; +}; + +/* Create blob tracking post processing module based on simle module: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateBlobTrackAnalysisList(CvBlobTrackAnalysisOne* (*create)()); + +/* Declarations of constructors of implemented modules: */ + +/* Based on histogram analysis of 2D FV (x,y): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistP(); + +/* Based on histogram analysis of 4D FV (x,y,vx,vy): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPV(); + +/* Based on histogram analysis of 5D FV (x,y,vx,vy,state): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPVS(); + +/* Based on histogram analysis of 4D FV (startpos,stoppos): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistSS(); + + + +/* Based on SVM classifier analysis of 2D FV (x,y): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP(); + +/* Based on SVM classifier analysis of 4D FV (x,y,vx,vy): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPV(); + +/* Based on SVM classifier analysis of 5D FV (x,y,vx,vy,state): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS(); + +/* Based on SVM classifier analysis of 4D FV (startpos,stoppos): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS(); + +/* Track analysis based on distance between tracks: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisTrackDist(); + +/* Analyzer based on reation Road and height map: */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysis3DRoadMap(); + +/* Analyzer that makes OR decision using set of analyzers: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisIOR(); + +/* Estimator of human height: */ +class CV_EXPORTS CvBlobTrackAnalysisHeight: public CvBlobTrackAnalysis +{ +public: + virtual double GetHeight(CvBlob* pB) = 0; +}; +//CV_EXPORTS CvBlobTrackAnalysisHeight* cvCreateModuleBlobTrackAnalysisHeightScale(); + + + +/* AUTO BLOB TRACKER INTERFACE -- pipeline of 3 modules: */ +class CV_EXPORTS CvBlobTrackerAuto: public CvVSModule +{ +public: + CvBlobTrackerAuto(){SetTypeName("BlobTrackerAuto");}; + virtual void Process(IplImage* pImg, IplImage* pMask = NULL) = 0; + virtual CvBlob* GetBlob(int index) = 0; + virtual CvBlob* GetBlobByID(int ID) = 0; + virtual int GetBlobNum() = 0; + virtual IplImage* GetFGMask(){return NULL;}; + virtual float GetState(int BlobID) = 0; + virtual const char* GetStateDesc(int BlobID) = 0; + /* return 0 if trajectory is normal; + * return >0 if trajectory abnormal. */ + virtual void Release() = 0; +}; +inline void cvReleaseBlobTrackerAuto(CvBlobTrackerAuto** ppT) +{ + ppT[0]->Release(); + ppT[0] = 0; +} +/* END AUTO BLOB TRACKER INTERFACE */ + + +/* Constructor functions and data for specific BlobTRackerAuto modules: */ + +/* Parameters of blobtracker auto ver1: */ +struct CvBlobTrackerAutoParam1 +{ + int FGTrainFrames; /* Number of frames needed for FG (foreground) detector to train. */ + + CvFGDetector* pFG; /* FGDetector module. If this field is NULL the Process FG mask is used. */ + + CvBlobDetector* pBD; /* Selected blob detector module. */ + /* If this field is NULL default blobdetector module will be created. */ + + CvBlobTracker* pBT; /* Selected blob tracking module. */ + /* If this field is NULL default blobtracker module will be created. */ + + CvBlobTrackGen* pBTGen; /* Selected blob trajectory generator. */ + /* If this field is NULL no generator is used. */ + + CvBlobTrackPostProc* pBTPP; /* Selected blob trajectory postprocessing module. */ + /* If this field is NULL no postprocessing is done. */ + + int UsePPData; + + CvBlobTrackAnalysis* pBTA; /* Selected blob trajectory analysis module. */ + /* If this field is NULL no track analysis is done. */ +}; + +/* Create blob tracker auto ver1: */ +CV_EXPORTS CvBlobTrackerAuto* cvCreateBlobTrackerAuto1(CvBlobTrackerAutoParam1* param = NULL); + +/* Simple loader for many auto trackers by its type : */ +inline CvBlobTrackerAuto* cvCreateBlobTrackerAuto(int type, void* param) +{ + if(type == 0) return cvCreateBlobTrackerAuto1((CvBlobTrackerAutoParam1*)param); + return 0; +} + + + +struct CvTracksTimePos +{ + int len1,len2; + int beg1,beg2; + int end1,end2; + int comLen; //common length for two tracks + int shift1,shift2; +}; + +/*CV_EXPORTS int cvCompareTracks( CvBlobTrackSeq *groundTruth, + CvBlobTrackSeq *result, + FILE *file);*/ + + +/* Constructor functions: */ + +CV_EXPORTS void cvCreateTracks_One(CvBlobTrackSeq *TS); +CV_EXPORTS void cvCreateTracks_Same(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2); +CV_EXPORTS void cvCreateTracks_AreaErr(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2, int addW, int addH); + + +/* HIST API */ +class CV_EXPORTS CvProb +{ +public: + virtual ~CvProb() {}; + + /* Calculate probability value: */ + virtual double Value(int* /*comp*/, int /*x*/ = 0, int /*y*/ = 0){return -1;}; + + /* Update histograpp Pnew = (1-W)*Pold + W*Padd*/ + /* W weight of new added prob */ + /* comps - matrix of new fetature vectors used to update prob */ + virtual void AddFeature(float W, int* comps, int x =0, int y = 0) = 0; + virtual void Scale(float factor = 0, int x = -1, int y = -1) = 0; + virtual void Release() = 0; +}; +inline void cvReleaseProb(CvProb** ppProb){ppProb[0]->Release();ppProb[0]=NULL;} +/* HIST API */ + +/* Some Prob: */ +CV_EXPORTS CvProb* cvCreateProbS(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbMG(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbMG2(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbHist(int dim, CvSize size); + +#define CV_BT_HIST_TYPE_S 0 +#define CV_BT_HIST_TYPE_MG 1 +#define CV_BT_HIST_TYPE_MG2 2 +#define CV_BT_HIST_TYPE_H 3 +inline CvProb* cvCreateProb(int type, int dim, CvSize size = cvSize(1,1), void* /*param*/ = NULL) +{ + if(type == CV_BT_HIST_TYPE_S) return cvCreateProbS(dim, size, -1); + if(type == CV_BT_HIST_TYPE_MG) return cvCreateProbMG(dim, size, -1); + if(type == CV_BT_HIST_TYPE_MG2) return cvCreateProbMG2(dim, size, -1); + if(type == CV_BT_HIST_TYPE_H) return cvCreateProbHist(dim, size); + return NULL; +} + + + +/* Noise type definitions: */ +#define CV_NOISE_NONE 0 +#define CV_NOISE_GAUSSIAN 1 +#define CV_NOISE_UNIFORM 2 +#define CV_NOISE_SPECKLE 3 +#define CV_NOISE_SALT_AND_PEPPER 4 + +/* Add some noise to image: */ +/* pImg - (input) image without noise */ +/* pImg - (output) image with noise */ +/* noise_type - type of added noise */ +/* CV_NOISE_GAUSSIAN - pImg += n , n - is gaussian noise with Ampl standart deviation */ +/* CV_NOISE_UNIFORM - pImg += n , n - is uniform noise with Ampl standart deviation */ +/* CV_NOISE_SPECKLE - pImg += n*pImg , n - is gaussian noise with Ampl standart deviation */ +/* CV_NOISE_SALT_AND_PAPPER - pImg = pImg with blacked and whited pixels, + Ampl is density of brocken pixels (0-there are not broken pixels, 1 - all pixels are broken)*/ +/* Ampl - "amplitude" of noise */ +//CV_EXPORTS void cvAddNoise(IplImage* pImg, int noise_type, double Ampl, CvRNG* rnd_state = NULL); + +/*================== GENERATOR OF TEST VIDEO SEQUENCE ===================== */ +typedef void CvTestSeq; + +/* pConfigfile - Name of file (yml or xml) with description of test sequence */ +/* videos - array of names of test videos described in "pConfigfile" file */ +/* numvideos - size of "videos" array */ +CV_EXPORTS CvTestSeq* cvCreateTestSeq(char* pConfigfile, char** videos, int numvideo, float Scale = 1, int noise_type = CV_NOISE_NONE, double noise_ampl = 0); +CV_EXPORTS void cvReleaseTestSeq(CvTestSeq** ppTestSeq); + +/* Generate next frame from test video seq and return pointer to it: */ +CV_EXPORTS IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq); + +/* Return pointer to current foreground mask: */ +CV_EXPORTS IplImage* cvTestSeqGetFGMask(CvTestSeq* pTestSeq); + +/* Return pointer to current image: */ +CV_EXPORTS IplImage* cvTestSeqGetImage(CvTestSeq* pTestSeq); + +/* Return frame size of result test video: */ +CV_EXPORTS CvSize cvTestSeqGetImageSize(CvTestSeq* pTestSeq); + +/* Return number of frames result test video: */ +CV_EXPORTS int cvTestSeqFrameNum(CvTestSeq* pTestSeq); + +/* Return number of existing objects. + * This is general number of any objects. + * For example number of trajectories may be equal or less than returned value: + */ +CV_EXPORTS int cvTestSeqGetObjectNum(CvTestSeq* pTestSeq); + +/* Return 0 if there is not position for current defined on current frame */ +/* Return 1 if there is object position and pPos was filled */ +CV_EXPORTS int cvTestSeqGetObjectPos(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pPos); +CV_EXPORTS int cvTestSeqGetObjectSize(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pSize); + +/* Add noise to final image: */ +CV_EXPORTS void cvTestSeqAddNoise(CvTestSeq* pTestSeq, int noise_type = CV_NOISE_NONE, double noise_ampl = 0); + +/* Add Intensity variation: */ +CV_EXPORTS void cvTestSeqAddIntensityVariation(CvTestSeq* pTestSeq, float DI_per_frame, float MinI, float MaxI); +CV_EXPORTS void cvTestSeqSetFrame(CvTestSeq* pTestSeq, int n); + +#endif + +/* End of file. */ diff --git a/OpenCV/Headers/legacy/compat.hpp b/OpenCV/Headers/legacy/compat.hpp new file mode 100644 index 0000000000..5b5495edcb --- /dev/null +++ b/OpenCV/Headers/legacy/compat.hpp @@ -0,0 +1,740 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + A few macros and definitions for backward compatibility + with the previous versions of OpenCV. They are obsolete and + are likely to be removed in future. To check whether your code + uses any of these, define CV_NO_BACKWARD_COMPATIBILITY before + including cv.h. +*/ + +#ifndef __OPENCV_COMPAT_HPP__ +#define __OPENCV_COMPAT_HPP__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int CvMatType; +typedef int CvDisMaskType; +typedef CvMat CvMatArray; + +typedef int CvThreshType; +typedef int CvAdaptiveThreshMethod; +typedef int CvCompareMethod; +typedef int CvFontFace; +typedef int CvPolyApproxMethod; +typedef int CvContoursMatchMethod; +typedef int CvContourTreesMatchMethod; +typedef int CvCoeffType; +typedef int CvRodriguesType; +typedef int CvElementShape; +typedef int CvMorphOp; +typedef int CvTemplMatchMethod; + +typedef CvPoint2D64f CvPoint2D64d; +typedef CvPoint3D64f CvPoint3D64d; + +enum +{ + CV_MAT32F = CV_32FC1, + CV_MAT3x1_32F = CV_32FC1, + CV_MAT4x1_32F = CV_32FC1, + CV_MAT3x3_32F = CV_32FC1, + CV_MAT4x4_32F = CV_32FC1, + + CV_MAT64D = CV_64FC1, + CV_MAT3x1_64D = CV_64FC1, + CV_MAT4x1_64D = CV_64FC1, + CV_MAT3x3_64D = CV_64FC1, + CV_MAT4x4_64D = CV_64FC1 +}; + +enum +{ + IPL_GAUSSIAN_5x5 = 7 +}; + +typedef CvBox2D CvBox2D32f; + +/* allocation/deallocation macros */ +#define cvCreateImageData cvCreateData +#define cvReleaseImageData cvReleaseData +#define cvSetImageData cvSetData +#define cvGetImageRawData cvGetRawData + +#define cvmAlloc cvCreateData +#define cvmFree cvReleaseData +#define cvmAllocArray cvCreateData +#define cvmFreeArray cvReleaseData + +#define cvIntegralImage cvIntegral +#define cvMatchContours cvMatchShapes + +CV_EXPORTS CvMat cvMatArray( int rows, int cols, int type, + int count, void* data CV_DEFAULT(0)); + +#define cvUpdateMHIByTime cvUpdateMotionHistory + +#define cvAccMask cvAcc +#define cvSquareAccMask cvSquareAcc +#define cvMultiplyAccMask cvMultiplyAcc +#define cvRunningAvgMask(imgY, imgU, mask, alpha) cvRunningAvg(imgY, imgU, alpha, mask) + +#define cvSetHistThresh cvSetHistBinRanges +#define cvCalcHistMask(img, mask, hist, doNotClear) cvCalcHist(img, hist, doNotClear, mask) + +CV_EXPORTS double cvMean( const CvArr* image, const CvArr* mask CV_DEFAULT(0)); +CV_EXPORTS double cvSumPixels( const CvArr* image ); +CV_EXPORTS void cvMean_StdDev( const CvArr* image, double* mean, double* sdv, + const CvArr* mask CV_DEFAULT(0)); + +CV_EXPORTS void cvmPerspectiveProject( const CvMat* mat, const CvArr* src, CvArr* dst ); +CV_EXPORTS void cvFillImage( CvArr* mat, double color ); + +#define cvCvtPixToPlane cvSplit +#define cvCvtPlaneToPix cvMerge + +typedef struct CvRandState +{ + CvRNG state; /* RNG state (the current seed and carry)*/ + int disttype; /* distribution type */ + CvScalar param[2]; /* parameters of RNG */ +} CvRandState; + +/* Changes RNG range while preserving RNG state */ +CV_EXPORTS void cvRandSetRange( CvRandState* state, double param1, + double param2, int index CV_DEFAULT(-1)); + +CV_EXPORTS void cvRandInit( CvRandState* state, double param1, + double param2, int seed, + int disttype CV_DEFAULT(CV_RAND_UNI)); + +/* Fills array with random numbers */ +CV_EXPORTS void cvRand( CvRandState* state, CvArr* arr ); + +#define cvRandNext( _state ) cvRandInt( &(_state)->state ) + +CV_EXPORTS void cvbRand( CvRandState* state, float* dst, int len ); + +CV_EXPORTS void cvbCartToPolar( const float* y, const float* x, + float* magnitude, float* angle, int len ); +CV_EXPORTS void cvbFastArctan( const float* y, const float* x, float* angle, int len ); +CV_EXPORTS void cvbSqrt( const float* x, float* y, int len ); +CV_EXPORTS void cvbInvSqrt( const float* x, float* y, int len ); +CV_EXPORTS void cvbReciprocal( const float* x, float* y, int len ); +CV_EXPORTS void cvbFastExp( const float* x, double* y, int len ); +CV_EXPORTS void cvbFastLog( const double* x, float* y, int len ); + +CV_EXPORTS CvRect cvContourBoundingRect( void* point_set, int update CV_DEFAULT(0)); + +CV_EXPORTS double cvPseudoInverse( const CvArr* src, CvArr* dst ); +#define cvPseudoInv cvPseudoInverse + +#define cvContourMoments( contour, moments ) cvMoments( contour, moments, 0 ) + +#define cvGetPtrAt cvPtr2D +#define cvGetAt cvGet2D +#define cvSetAt(arr,val,y,x) cvSet2D((arr),(y),(x),(val)) + +#define cvMeanMask cvMean +#define cvMean_StdDevMask(img,mask,mean,sdv) cvMean_StdDev(img,mean,sdv,mask) + +#define cvNormMask(imgA,imgB,mask,normType) cvNorm(imgA,imgB,normType,mask) + +#define cvMinMaxLocMask(img, mask, min_val, max_val, min_loc, max_loc) \ + cvMinMaxLoc(img, min_val, max_val, min_loc, max_loc, mask) + +#define cvRemoveMemoryManager cvSetMemoryManager + +#define cvmSetZero( mat ) cvSetZero( mat ) +#define cvmSetIdentity( mat ) cvSetIdentity( mat ) +#define cvmAdd( src1, src2, dst ) cvAdd( src1, src2, dst, 0 ) +#define cvmSub( src1, src2, dst ) cvSub( src1, src2, dst, 0 ) +#define cvmCopy( src, dst ) cvCopy( src, dst, 0 ) +#define cvmMul( src1, src2, dst ) cvMatMulAdd( src1, src2, 0, dst ) +#define cvmTranspose( src, dst ) cvT( src, dst ) +#define cvmInvert( src, dst ) cvInv( src, dst ) +#define cvmMahalanobis(vec1, vec2, mat) cvMahalanobis( vec1, vec2, mat ) +#define cvmDotProduct( vec1, vec2 ) cvDotProduct( vec1, vec2 ) +#define cvmCrossProduct(vec1, vec2,dst) cvCrossProduct( vec1, vec2, dst ) +#define cvmTrace( mat ) (cvTrace( mat )).val[0] +#define cvmMulTransposed( src, dst, order ) cvMulTransposed( src, dst, order ) +#define cvmEigenVV( mat, evec, eval, eps) cvEigenVV( mat, evec, eval, eps ) +#define cvmDet( mat ) cvDet( mat ) +#define cvmScale( src, dst, scale ) cvScale( src, dst, scale ) + +#define cvCopyImage( src, dst ) cvCopy( src, dst, 0 ) +#define cvReleaseMatHeader cvReleaseMat + +/* Calculates exact convex hull of 2d point set */ +CV_EXPORTS void cvConvexHull( CvPoint* points, int num_points, + CvRect* bound_rect, + int orientation, int* hull, int* hullsize ); + + +CV_EXPORTS void cvMinAreaRect( CvPoint* points, int n, + int left, int bottom, + int right, int top, + CvPoint2D32f* anchor, + CvPoint2D32f* vect1, + CvPoint2D32f* vect2 ); + +typedef int CvDisType; +typedef int CvChainApproxMethod; +typedef int CvContourRetrievalMode; + +CV_EXPORTS void cvFitLine3D( CvPoint3D32f* points, int count, int dist, + void *param, float reps, float aeps, float* line ); + +/* Fits a line into set of 2d points in a robust way (M-estimator technique) */ +CV_EXPORTS void cvFitLine2D( CvPoint2D32f* points, int count, int dist, + void *param, float reps, float aeps, float* line ); + +CV_EXPORTS void cvFitEllipse( const CvPoint2D32f* points, int count, CvBox2D* box ); + +/* Projects 2d points to one of standard coordinate planes + (i.e. removes one of coordinates) */ +CV_EXPORTS void cvProject3D( CvPoint3D32f* points3D, int count, + CvPoint2D32f* points2D, + int xIndx CV_DEFAULT(0), + int yIndx CV_DEFAULT(1)); + +/* Retrieves value of the particular bin + of x-dimensional (x=1,2,3,...) histogram */ +#define cvQueryHistValue_1D( hist, idx0 ) \ + ((float)cvGetReal1D( (hist)->bins, (idx0))) +#define cvQueryHistValue_2D( hist, idx0, idx1 ) \ + ((float)cvGetReal2D( (hist)->bins, (idx0), (idx1))) +#define cvQueryHistValue_3D( hist, idx0, idx1, idx2 ) \ + ((float)cvGetReal3D( (hist)->bins, (idx0), (idx1), (idx2))) +#define cvQueryHistValue_nD( hist, idx ) \ + ((float)cvGetRealND( (hist)->bins, (idx))) + +/* Returns pointer to the particular bin of x-dimesional histogram. + For sparse histogram the bin is created if it didn't exist before */ +#define cvGetHistValue_1D( hist, idx0 ) \ + ((float*)cvPtr1D( (hist)->bins, (idx0), 0)) +#define cvGetHistValue_2D( hist, idx0, idx1 ) \ + ((float*)cvPtr2D( (hist)->bins, (idx0), (idx1), 0)) +#define cvGetHistValue_3D( hist, idx0, idx1, idx2 ) \ + ((float*)cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0)) +#define cvGetHistValue_nD( hist, idx ) \ + ((float*)cvPtrND( (hist)->bins, (idx), 0)) + + +#define CV_IS_SET_ELEM_EXISTS CV_IS_SET_ELEM + + +CV_EXPORTS int cvHoughLines( CvArr* image, double rho, + double theta, int threshold, + float* lines, int linesNumber ); + +CV_EXPORTS int cvHoughLinesP( CvArr* image, double rho, + double theta, int threshold, + int lineLength, int lineGap, + int* lines, int linesNumber ); + + +CV_EXPORTS int cvHoughLinesSDiv( CvArr* image, double rho, int srn, + double theta, int stn, int threshold, + float* lines, int linesNumber ); + +CV_EXPORTS float cvCalcEMD( const float* signature1, int size1, + const float* signature2, int size2, + int dims, int dist_type CV_DEFAULT(CV_DIST_L2), + CvDistanceFunction dist_func CV_DEFAULT(0), + float* lower_bound CV_DEFAULT(0), + void* user_param CV_DEFAULT(0)); + +CV_EXPORTS void cvKMeans( int num_clusters, float** samples, + int num_samples, int vec_size, + CvTermCriteria termcrit, int* cluster_idx ); + +CV_EXPORTS void cvStartScanGraph( CvGraph* graph, CvGraphScanner* scanner, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +CV_EXPORTS void cvEndScanGraph( CvGraphScanner* scanner ); + + +/* old drawing functions */ +CV_EXPORTS void cvLineAA( CvArr* img, CvPoint pt1, CvPoint pt2, + double color, int scale CV_DEFAULT(0)); + +CV_EXPORTS void cvCircleAA( CvArr* img, CvPoint center, int radius, + double color, int scale CV_DEFAULT(0) ); + +CV_EXPORTS void cvEllipseAA( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, + double end_angle, double color, + int scale CV_DEFAULT(0) ); + +CV_EXPORTS void cvPolyLineAA( CvArr* img, CvPoint** pts, int* npts, int contours, + int is_closed, double color, int scale CV_DEFAULT(0) ); + +/****************************************************************************************\ +* Pixel Access Macros * +\****************************************************************************************/ + +typedef struct _CvPixelPosition8u +{ + uchar* currline; /* pointer to the start of the current pixel line */ + uchar* topline; /* pointer to the start of the top pixel line */ + uchar* bottomline; /* pointer to the start of the first line */ + /* which is below the image */ + int x; /* current x coordinate ( in pixels ) */ + int width; /* width of the image ( in pixels ) */ + int height; /* height of the image ( in pixels ) */ + int step; /* distance between lines ( in elements of single */ + /* plane ) */ + int step_arr[3]; /* array: ( 0, -step, step ). It is used for */ + /* vertical moving */ +} CvPixelPosition8u; + +/* this structure differs from the above only in data type */ +typedef struct _CvPixelPosition8s +{ + schar* currline; + schar* topline; + schar* bottomline; + int x; + int width; + int height; + int step; + int step_arr[3]; +} CvPixelPosition8s; + +/* this structure differs from the CvPixelPosition8u only in data type */ +typedef struct _CvPixelPosition32f +{ + float* currline; + float* topline; + float* bottomline; + int x; + int width; + int height; + int step; + int step_arr[3]; +} CvPixelPosition32f; + + +/* Initialize one of the CvPixelPosition structures. */ +/* pos - initialized structure */ +/* origin - pointer to the left-top corner of the ROI */ +/* step - width of the whole image in bytes */ +/* roi - width & height of the ROI */ +/* x, y - initial position */ +#define CV_INIT_PIXEL_POS(pos, origin, _step, roi, _x, _y, orientation) \ + ( \ + (pos).step = (_step)/sizeof((pos).currline[0]) * (orientation ? -1 : 1), \ + (pos).width = (roi).width, \ + (pos).height = (roi).height, \ + (pos).bottomline = (origin) + (pos).step*(pos).height, \ + (pos).topline = (origin) - (pos).step, \ + (pos).step_arr[0] = 0, \ + (pos).step_arr[1] = -(pos).step, \ + (pos).step_arr[2] = (pos).step, \ + (pos).x = (_x), \ + (pos).currline = (origin) + (pos).step*(_y) ) + + +/* Move to specified point ( absolute shift ) */ +/* pos - position structure */ +/* x, y - coordinates of the new position */ +/* cs - number of the image channels */ +#define CV_MOVE_TO( pos, _x, _y, cs ) \ +((pos).currline = (_y) >= 0 && (_y) < (pos).height ? (pos).topline + ((_y)+1)*(pos).step : 0, \ + (pos).x = (_x) >= 0 && (_x) < (pos).width ? (_x) : 0, (pos).currline + (_x) * (cs) ) + +/* Get current coordinates */ +/* pos - position structure */ +/* x, y - coordinates of the new position */ +/* cs - number of the image channels */ +#define CV_GET_CURRENT( pos, cs ) ((pos).currline + (pos).x * (cs)) + +/* Move by one pixel relatively to current position */ +/* pos - position structure */ +/* cs - number of the image channels */ + +/* left */ +#define CV_MOVE_LEFT( pos, cs ) \ + ( --(pos).x >= 0 ? (pos).currline + (pos).x*(cs) : 0 ) + +/* right */ +#define CV_MOVE_RIGHT( pos, cs ) \ + ( ++(pos).x < (pos).width ? (pos).currline + (pos).x*(cs) : 0 ) + +/* up */ +#define CV_MOVE_UP( pos, cs ) \ + (((pos).currline -= (pos).step) != (pos).topline ? (pos).currline + (pos).x*(cs) : 0 ) + +/* down */ +#define CV_MOVE_DOWN( pos, cs ) \ + (((pos).currline += (pos).step) != (pos).bottomline ? (pos).currline + (pos).x*(cs) : 0 ) + +/* left up */ +#define CV_MOVE_LU( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_UP(pos, cs)) + +/* right up */ +#define CV_MOVE_RU( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_UP(pos, cs)) + +/* left down */ +#define CV_MOVE_LD( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_DOWN(pos, cs)) + +/* right down */ +#define CV_MOVE_RD( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_DOWN(pos, cs)) + + + +/* Move by one pixel relatively to current position with wrapping when the position */ +/* achieves image boundary */ +/* pos - position structure */ +/* cs - number of the image channels */ + +/* left */ +#define CV_MOVE_LEFT_WRAP( pos, cs ) \ + ((pos).currline + ( --(pos).x >= 0 ? (pos).x : ((pos).x = (pos).width-1))*(cs)) + +/* right */ +#define CV_MOVE_RIGHT_WRAP( pos, cs ) \ + ((pos).currline + ( ++(pos).x < (pos).width ? (pos).x : ((pos).x = 0))*(cs) ) + +/* up */ +#define CV_MOVE_UP_WRAP( pos, cs ) \ + ((((pos).currline -= (pos).step) != (pos).topline ? \ + (pos).currline : ((pos).currline = (pos).bottomline - (pos).step)) + (pos).x*(cs) ) + +/* down */ +#define CV_MOVE_DOWN_WRAP( pos, cs ) \ + ((((pos).currline += (pos).step) != (pos).bottomline ? \ + (pos).currline : ((pos).currline = (pos).topline + (pos).step)) + (pos).x*(cs) ) + +/* left up */ +#define CV_MOVE_LU_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs)) +/* right up */ +#define CV_MOVE_RU_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs)) +/* left down */ +#define CV_MOVE_LD_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs)) +/* right down */ +#define CV_MOVE_RD_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs)) + +/* Numeric constants which used for moving in arbitrary direction */ +enum +{ + CV_SHIFT_NONE = 2, + CV_SHIFT_LEFT = 1, + CV_SHIFT_RIGHT = 3, + CV_SHIFT_UP = 6, + CV_SHIFT_DOWN = 10, + CV_SHIFT_LU = 5, + CV_SHIFT_RU = 7, + CV_SHIFT_LD = 9, + CV_SHIFT_RD = 11 +}; + +/* Move by one pixel in specified direction */ +/* pos - position structure */ +/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */ +/* cs - number of the image channels */ +#define CV_MOVE_PARAM( pos, shift, cs ) \ + ( (pos).currline += (pos).step_arr[(shift)>>2], (pos).x += ((shift)&3)-2, \ + ((pos).currline != (pos).topline && (pos).currline != (pos).bottomline && \ + (pos).x >= 0 && (pos).x < (pos).width) ? (pos).currline + (pos).x*(cs) : 0 ) + +/* Move by one pixel in specified direction with wrapping when the */ +/* position achieves image boundary */ +/* pos - position structure */ +/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */ +/* cs - number of the image channels */ +#define CV_MOVE_PARAM_WRAP( pos, shift, cs ) \ + ( (pos).currline += (pos).step_arr[(shift)>>2], \ + (pos).currline = ((pos).currline == (pos).topline ? \ + (pos).bottomline - (pos).step : \ + (pos).currline == (pos).bottomline ? \ + (pos).topline + (pos).step : (pos).currline), \ + \ + (pos).x += ((shift)&3)-2, \ + (pos).x = ((pos).x < 0 ? (pos).width-1 : (pos).x >= (pos).width ? 0 : (pos).x), \ + \ + (pos).currline + (pos).x*(cs) ) + + +typedef float* CvVect32f; +typedef float* CvMatr32f; +typedef double* CvVect64d; +typedef double* CvMatr64d; + +CV_EXPORTS void cvUnDistortOnce( const CvArr* src, CvArr* dst, + const float* intrinsic_matrix, + const float* distortion_coeffs, + int interpolate ); + +/* the two functions below have quite hackerish implementations, use with care + (or, which is better, switch to cvUndistortInitMap and cvRemap instead */ +CV_EXPORTS void cvUnDistortInit( const CvArr* src, + CvArr* undistortion_map, + const float* A, const float* k, + int interpolate ); + +CV_EXPORTS void cvUnDistort( const CvArr* src, CvArr* dst, + const CvArr* undistortion_map, + int interpolate ); + +/* Find fundamental matrix */ +CV_EXPORTS void cvFindFundamentalMatrix( int* points1, int* points2, + int numpoints, int method, float* matrix ); + + +CV_EXPORTS int cvFindChessBoardCornerGuesses( const void* arr, void* thresharr, + CvMemStorage* storage, + CvSize pattern_size, CvPoint2D32f * corners, + int *corner_count ); + +/* Calibrates camera using multiple views of calibration pattern */ +CV_EXPORTS void cvCalibrateCamera( int image_count, int* _point_counts, + CvSize image_size, CvPoint2D32f* _image_points, CvPoint3D32f* _object_points, + float* _distortion_coeffs, float* _camera_matrix, float* _translation_vectors, + float* _rotation_matrices, int flags ); + + +CV_EXPORTS void cvCalibrateCamera_64d( int image_count, int* _point_counts, + CvSize image_size, CvPoint2D64f* _image_points, CvPoint3D64f* _object_points, + double* _distortion_coeffs, double* _camera_matrix, double* _translation_vectors, + double* _rotation_matrices, int flags ); + + +/* Find 3d position of object given intrinsic camera parameters, + 3d model of the object and projection of the object into view plane */ +CV_EXPORTS void cvFindExtrinsicCameraParams( int point_count, + CvSize image_size, CvPoint2D32f* _image_points, + CvPoint3D32f* _object_points, float* focal_length, + CvPoint2D32f principal_point, float* _distortion_coeffs, + float* _rotation_vector, float* _translation_vector ); + +/* Variant of the previous function that takes double-precision parameters */ +CV_EXPORTS void cvFindExtrinsicCameraParams_64d( int point_count, + CvSize image_size, CvPoint2D64f* _image_points, + CvPoint3D64f* _object_points, double* focal_length, + CvPoint2D64f principal_point, double* _distortion_coeffs, + double* _rotation_vector, double* _translation_vector ); + +/* Rodrigues transform */ +enum +{ + CV_RODRIGUES_M2V = 0, + CV_RODRIGUES_V2M = 1 +}; + +/* Converts rotation_matrix matrix to rotation_matrix vector or vice versa */ +CV_EXPORTS void cvRodrigues( CvMat* rotation_matrix, CvMat* rotation_vector, + CvMat* jacobian, int conv_type ); + +/* Does reprojection of 3d object points to the view plane */ +CV_EXPORTS void cvProjectPoints( int point_count, CvPoint3D64f* _object_points, + double* _rotation_vector, double* _translation_vector, + double* focal_length, CvPoint2D64f principal_point, + double* _distortion, CvPoint2D64f* _image_points, + double* _deriv_points_rotation_matrix, + double* _deriv_points_translation_vect, + double* _deriv_points_focal, + double* _deriv_points_principal_point, + double* _deriv_points_distortion_coeffs ); + + +/* Simpler version of the previous function */ +CV_EXPORTS void cvProjectPointsSimple( int point_count, CvPoint3D64f* _object_points, + double* _rotation_matrix, double* _translation_vector, + double* _camera_matrix, double* _distortion, CvPoint2D64f* _image_points ); + + +#define cvMake2DPoints cvConvertPointsHomogeneous +#define cvMake3DPoints cvConvertPointsHomogeneous + +#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform + +#define cvConvertPointsHomogenious cvConvertPointsHomogeneous + + +//////////////////////////////////// feature extractors: obsolete API ////////////////////////////////// + +typedef struct CvSURFPoint +{ + CvPoint2D32f pt; + + int laplacian; + int size; + float dir; + float hessian; + +} CvSURFPoint; + +CV_INLINE CvSURFPoint cvSURFPoint( CvPoint2D32f pt, int laplacian, + int size, float dir CV_DEFAULT(0), + float hessian CV_DEFAULT(0)) +{ + CvSURFPoint kp; + + kp.pt = pt; + kp.laplacian = laplacian; + kp.size = size; + kp.dir = dir; + kp.hessian = hessian; + + return kp; +} + +typedef struct CvSURFParams +{ + int extended; + int upright; + double hessianThreshold; + + int nOctaves; + int nOctaveLayers; + +} CvSURFParams; + +CVAPI(CvSURFParams) cvSURFParams( double hessianThreshold, int extended CV_DEFAULT(0) ); + +// If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed +// at the locations provided in keypoints (a CvSeq of CvSURFPoint). +CVAPI(void) cvExtractSURF( const CvArr* img, const CvArr* mask, + CvSeq** keypoints, CvSeq** descriptors, + CvMemStorage* storage, CvSURFParams params, + int useProvidedKeyPts CV_DEFAULT(0) ); + +/*! + Maximal Stable Regions Parameters + */ +typedef struct CvMSERParams +{ + //! delta, in the code, it compares (size_{i}-size_{i-delta})/size_{i-delta} + int delta; + //! prune the area which bigger than maxArea + int maxArea; + //! prune the area which smaller than minArea + int minArea; + //! prune the area have simliar size to its children + float maxVariation; + //! trace back to cut off mser with diversity < min_diversity + float minDiversity; + + /////// the next few params for MSER of color image + + //! for color image, the evolution steps + int maxEvolution; + //! the area threshold to cause re-initialize + double areaThreshold; + //! ignore too small margin + double minMargin; + //! the aperture size for edge blur + int edgeBlurSize; +} CvMSERParams; + +CVAPI(CvMSERParams) cvMSERParams( int delta CV_DEFAULT(5), int min_area CV_DEFAULT(60), + int max_area CV_DEFAULT(14400), float max_variation CV_DEFAULT(.25f), + float min_diversity CV_DEFAULT(.2f), int max_evolution CV_DEFAULT(200), + double area_threshold CV_DEFAULT(1.01), + double min_margin CV_DEFAULT(.003), + int edge_blur_size CV_DEFAULT(5) ); + +// Extracts the contours of Maximally Stable Extremal Regions +CVAPI(void) cvExtractMSER( CvArr* _img, CvArr* _mask, CvSeq** contours, CvMemStorage* storage, CvMSERParams params ); + + +typedef struct CvStarKeypoint +{ + CvPoint pt; + int size; + float response; +} CvStarKeypoint; + +CV_INLINE CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response) +{ + CvStarKeypoint kpt; + kpt.pt = pt; + kpt.size = size; + kpt.response = response; + return kpt; +} + +typedef struct CvStarDetectorParams +{ + int maxSize; + int responseThreshold; + int lineThresholdProjected; + int lineThresholdBinarized; + int suppressNonmaxSize; +} CvStarDetectorParams; + +CV_INLINE CvStarDetectorParams cvStarDetectorParams( + int maxSize CV_DEFAULT(45), + int responseThreshold CV_DEFAULT(30), + int lineThresholdProjected CV_DEFAULT(10), + int lineThresholdBinarized CV_DEFAULT(8), + int suppressNonmaxSize CV_DEFAULT(5)) +{ + CvStarDetectorParams params; + params.maxSize = maxSize; + params.responseThreshold = responseThreshold; + params.lineThresholdProjected = lineThresholdProjected; + params.lineThresholdBinarized = lineThresholdBinarized; + params.suppressNonmaxSize = suppressNonmaxSize; + + return params; +} + +CVAPI(CvSeq*) cvGetStarKeypoints( const CvArr* img, CvMemStorage* storage, + CvStarDetectorParams params CV_DEFAULT(cvStarDetectorParams())); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/OpenCV/Headers/legacy/legacy.hpp b/OpenCV/Headers/legacy/legacy.hpp new file mode 100644 index 0000000000..96da25c9e9 --- /dev/null +++ b/OpenCV/Headers/legacy/legacy.hpp @@ -0,0 +1,3436 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_LEGACY_HPP__ +#define __OPENCV_LEGACY_HPP__ + +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/ml/ml.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr, + double canny_threshold, + double ffill_threshold, + CvMemStorage* storage ); + +/****************************************************************************************\ +* Eigen objects * +\****************************************************************************************/ + +typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data); +typedef union +{ + CvCallback callback; + void* data; +} +CvInput; + +#define CV_EIGOBJ_NO_CALLBACK 0 +#define CV_EIGOBJ_INPUT_CALLBACK 1 +#define CV_EIGOBJ_OUTPUT_CALLBACK 2 +#define CV_EIGOBJ_BOTH_CALLBACK 3 + +/* Calculates covariation matrix of a set of arrays */ +CVAPI(void) cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags, + int ioBufSize, uchar* buffer, void* userData, + IplImage* avg, float* covarMatrix ); + +/* Calculates eigen values and vectors of covariation matrix of a set of + arrays */ +CVAPI(void) cvCalcEigenObjects( int nObjects, void* input, void* output, + int ioFlags, int ioBufSize, void* userData, + CvTermCriteria* calcLimit, IplImage* avg, + float* eigVals ); + +/* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */ +CVAPI(double) cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg ); + +/* Projects image to eigen space (finds all decomposion coefficients */ +CVAPI(void) cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput, + int ioFlags, void* userData, IplImage* avg, + float* coeffs ); + +/* Projects original objects used to calculate eigen space basis to that space */ +CVAPI(void) cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags, + void* userData, float* coeffs, IplImage* avg, + IplImage* proj ); + +/****************************************************************************************\ +* 1D/2D HMM * +\****************************************************************************************/ + +typedef struct CvImgObsInfo +{ + int obs_x; + int obs_y; + int obs_size; + float* obs;//consequtive observations + + int* state;/* arr of pairs superstate/state to which observation belong */ + int* mix; /* number of mixture to which observation belong */ + +} CvImgObsInfo;/*struct for 1 image*/ + +typedef CvImgObsInfo Cv1DObsInfo; + +typedef struct CvEHMMState +{ + int num_mix; /*number of mixtures in this state*/ + float* mu; /*mean vectors corresponding to each mixture*/ + float* inv_var; /* square root of inversed variances corresp. to each mixture*/ + float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */ + float* weight; /*array of mixture weights. Summ of all weights in state is 1. */ + +} CvEHMMState; + +typedef struct CvEHMM +{ + int level; /* 0 - lowest(i.e its states are real states), ..... */ + int num_states; /* number of HMM states */ + float* transP;/*transition probab. matrices for states */ + float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm + if level == 1 - martix of matrices */ + union + { + CvEHMMState* state; /* if level == 0 points to real states array, + if not - points to embedded hmms */ + struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */ + } u; + +} CvEHMM; + +/*CVAPI(int) icvCreate1DHMM( CvEHMM** this_hmm, + int state_number, int* num_mix, int obs_size ); + +CVAPI(int) icvRelease1DHMM( CvEHMM** phmm ); + +CVAPI(int) icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm ); + +CVAPI(int) icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm); + +CVAPI(int) icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm); + +CVAPI(int) icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm ); + +CVAPI(int) icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array, + int num_seq, + CvEHMM* hmm ); + +CVAPI(float) icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm); + +CVAPI(int) icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/ + +/*********************************** Embedded HMMs *************************************/ + +/* Creates 2D HMM */ +CVAPI(CvEHMM*) cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize ); + +/* Releases HMM */ +CVAPI(void) cvRelease2DHMM( CvEHMM** hmm ); + +#define CV_COUNT_OBS(roi, win, delta, numObs ) \ +{ \ + (numObs)->width =((roi)->width -(win)->width +(delta)->width)/(delta)->width; \ + (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\ +} + +/* Creates storage for observation vectors */ +CVAPI(CvImgObsInfo*) cvCreateObsInfo( CvSize numObs, int obsSize ); + +/* Releases storage for observation vectors */ +CVAPI(void) cvReleaseObsInfo( CvImgObsInfo** obs_info ); + + +/* The function takes an image on input and and returns the sequnce of observations + to be used with an embedded HMM; Each observation is top-left block of DCT + coefficient matrix */ +CVAPI(void) cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize, + CvSize obsSize, CvSize delta ); + + +/* Uniformly segments all observation vectors extracted from image */ +CVAPI(void) cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm ); + +/* Does mixture segmentation of the states of embedded HMM */ +CVAPI(void) cvInitMixSegm( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function calculates means, variances, weights of every Gaussian mixture + of every low-level state of embedded HMM */ +CVAPI(void) cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function computes transition probability matrices of embedded HMM + given observations segmentation */ +CVAPI(void) cvEstimateTransProb( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function computes probabilities of appearing observations at any state + (i.e. computes P(obs|state) for every pair(obs,state)) */ +CVAPI(void) cvEstimateObsProb( CvImgObsInfo* obs_info, + CvEHMM* hmm ); + +/* Runs Viterbi algorithm for embedded HMM */ +CVAPI(float) cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm ); + + +/* Function clusters observation vectors from several images + given observations segmentation. + Euclidean distance used for clustering vectors. + Centers of clusters are given means of every mixture */ +CVAPI(void) cvMixSegmL2( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/****************************************************************************************\ +* A few functions from old stereo gesture recognition demosions * +\****************************************************************************************/ + +/* Creates hand mask image given several points on the hand */ +CVAPI(void) cvCreateHandMask( CvSeq* hand_points, + IplImage *img_mask, CvRect *roi); + +/* Finds hand region in range image data */ +CVAPI(void) cvFindHandRegion (CvPoint3D32f* points, int count, + CvSeq* indexs, + float* line, CvSize2D32f size, int flag, + CvPoint3D32f* center, + CvMemStorage* storage, CvSeq **numbers); + +/* Finds hand region in range image data (advanced version) */ +CVAPI(void) cvFindHandRegionA( CvPoint3D32f* points, int count, + CvSeq* indexs, + float* line, CvSize2D32f size, int jc, + CvPoint3D32f* center, + CvMemStorage* storage, CvSeq **numbers); + +/* Calculates the cooficients of the homography matrix */ +CVAPI(void) cvCalcImageHomography( float* line, CvPoint3D32f* center, + float* intrinsic, float* homography ); + +/****************************************************************************************\ +* More operations on sequences * +\****************************************************************************************/ + +/*****************************************************************************************/ + +#define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr)) +#define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem)) + +#define CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\ + float weight; + +#define CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS() + +typedef struct CvGraphWeightedVtx +{ + CV_GRAPH_WEIGHTED_VERTEX_FIELDS() +} CvGraphWeightedVtx; + +typedef struct CvGraphWeightedEdge +{ + CV_GRAPH_WEIGHTED_EDGE_FIELDS() +} CvGraphWeightedEdge; + +typedef enum CvGraphWeightType +{ + CV_NOT_WEIGHTED, + CV_WEIGHTED_VTX, + CV_WEIGHTED_EDGE, + CV_WEIGHTED_ALL +} CvGraphWeightType; + + +/* Calculates histogram of a contour */ +CVAPI(void) cvCalcPGH( const CvSeq* contour, CvHistogram* hist ); + +#define CV_DOMINANT_IPAN 1 + +/* Finds high-curvature points of the contour */ +CVAPI(CvSeq*) cvFindDominantPoints( CvSeq* contour, CvMemStorage* storage, + int method CV_DEFAULT(CV_DOMINANT_IPAN), + double parameter1 CV_DEFAULT(0), + double parameter2 CV_DEFAULT(0), + double parameter3 CV_DEFAULT(0), + double parameter4 CV_DEFAULT(0)); + +/*****************************************************************************************/ + + +/*******************************Stereo correspondence*************************************/ + +typedef struct CvCliqueFinder +{ + CvGraph* graph; + int** adj_matr; + int N; //graph size + + // stacks, counters etc/ + int k; //stack size + int* current_comp; + int** All; + + int* ne; + int* ce; + int* fixp; //node with minimal disconnections + int* nod; + int* s; //for selected candidate + int status; + int best_score; + int weighted; + int weighted_edges; + float best_weight; + float* edge_weights; + float* vertex_weights; + float* cur_weight; + float* cand_weight; + +} CvCliqueFinder; + +#define CLIQUE_TIME_OFF 2 +#define CLIQUE_FOUND 1 +#define CLIQUE_END 0 + +/*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse, + int weighted CV_DEFAULT(0), int weighted_edges CV_DEFAULT(0)); +CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) ); +CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder ); + +CVAPI(void) cvBronKerbosch( CvGraph* graph );*/ + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// +// Name: cvSubgraphWeight +// Purpose: finds weight of subgraph in a graph +// Context: +// Parameters: +// graph - input graph. +// subgraph - sequence of pairwise different ints. These are indices of vertices of subgraph. +// weight_type - describes the way we measure weight. +// one of the following: +// CV_NOT_WEIGHTED - weight of a clique is simply its size +// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices +// CV_WEIGHTED_EDGE - the same but edges +// CV_WEIGHTED_ALL - the same but both edges and vertices +// weight_vtx - optional vector of floats, with size = graph->total. +// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL +// weights of vertices must be provided. If weight_vtx not zero +// these weights considered to be here, otherwise function assumes +// that vertices of graph are inherited from CvGraphWeightedVtx. +// weight_edge - optional matrix of floats, of width and height = graph->total. +// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// weights of edges ought to be supplied. If weight_edge is not zero +// function finds them here, otherwise function expects +// edges of graph to be inherited from CvGraphWeightedEdge. +// If this parameter is not zero structure of the graph is determined from matrix +// rather than from CvGraphEdge's. In particular, elements corresponding to +// absent edges should be zero. +// Returns: +// weight of subgraph. +// Notes: +//F*/ +/*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph, + CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED), + CvVect32f weight_vtx CV_DEFAULT(0), + CvMatr32f weight_edge CV_DEFAULT(0) );*/ + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// +// Name: cvFindCliqueEx +// Purpose: tries to find clique with maximum possible weight in a graph +// Context: +// Parameters: +// graph - input graph. +// storage - memory storage to be used by the result. +// is_complementary - optional flag showing whether function should seek for clique +// in complementary graph. +// weight_type - describes our notion about weight. +// one of the following: +// CV_NOT_WEIGHTED - weight of a clique is simply its size +// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices +// CV_WEIGHTED_EDGE - the same but edges +// CV_WEIGHTED_ALL - the same but both edges and vertices +// weight_vtx - optional vector of floats, with size = graph->total. +// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL +// weights of vertices must be provided. If weight_vtx not zero +// these weights considered to be here, otherwise function assumes +// that vertices of graph are inherited from CvGraphWeightedVtx. +// weight_edge - optional matrix of floats, of width and height = graph->total. +// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// weights of edges ought to be supplied. If weight_edge is not zero +// function finds them here, otherwise function expects +// edges of graph to be inherited from CvGraphWeightedEdge. +// Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// nonzero is_complementary implies nonzero weight_edge. +// start_clique - optional sequence of pairwise different ints. They are indices of +// vertices that shall be present in the output clique. +// subgraph_of_ban - optional sequence of (maybe equal) ints. They are indices of +// vertices that shall not be present in the output clique. +// clique_weight_ptr - optional output parameter. Weight of found clique stored here. +// num_generations - optional number of generations in evolutionary part of algorithm, +// zero forces to return first found clique. +// quality - optional parameter determining degree of required quality/speed tradeoff. +// Must be in the range from 0 to 9. +// 0 is fast and dirty, 9 is slow but hopefully yields good clique. +// Returns: +// sequence of pairwise different ints. +// These are indices of vertices that form found clique. +// Notes: +// in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative. +// start_clique has a priority over subgraph_of_ban. +//F*/ +/*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage, + int is_complementary CV_DEFAULT(0), + CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED), + CvVect32f weight_vtx CV_DEFAULT(0), + CvMatr32f weight_edge CV_DEFAULT(0), + CvSeq *start_clique CV_DEFAULT(0), + CvSeq *subgraph_of_ban CV_DEFAULT(0), + float *clique_weight_ptr CV_DEFAULT(0), + int num_generations CV_DEFAULT(3), + int quality CV_DEFAULT(2) );*/ + + +#define CV_UNDEF_SC_PARAM 12345 //default value of parameters + +#define CV_IDP_BIRCHFIELD_PARAM1 25 +#define CV_IDP_BIRCHFIELD_PARAM2 5 +#define CV_IDP_BIRCHFIELD_PARAM3 12 +#define CV_IDP_BIRCHFIELD_PARAM4 15 +#define CV_IDP_BIRCHFIELD_PARAM5 25 + + +#define CV_DISPARITY_BIRCHFIELD 0 + + +/*F/////////////////////////////////////////////////////////////////////////// +// +// Name: cvFindStereoCorrespondence +// Purpose: find stereo correspondence on stereo-pair +// Context: +// Parameters: +// leftImage - left image of stereo-pair (format 8uC1). +// rightImage - right image of stereo-pair (format 8uC1). +// mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only) +// dispImage - destination disparity image +// maxDisparity - maximal disparity +// param1, param2, param3, param4, param5 - parameters of algorithm +// Returns: +// Notes: +// Images must be rectified. +// All images must have format 8uC1. +//F*/ +CVAPI(void) +cvFindStereoCorrespondence( + const CvArr* leftImage, const CvArr* rightImage, + int mode, + CvArr* dispImage, + int maxDisparity, + double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) ); + +/*****************************************************************************************/ +/************ Epiline functions *******************/ + + + +typedef struct CvStereoLineCoeff +{ + double Xcoef; + double XcoefA; + double XcoefB; + double XcoefAB; + + double Ycoef; + double YcoefA; + double YcoefB; + double YcoefAB; + + double Zcoef; + double ZcoefA; + double ZcoefB; + double ZcoefAB; +}CvStereoLineCoeff; + + +typedef struct CvCamera +{ + float imgSize[2]; /* size of the camera view, used during calibration */ + float matrix[9]; /* intinsic camera parameters: [ fx 0 cx; 0 fy cy; 0 0 1 ] */ + float distortion[4]; /* distortion coefficients - two coefficients for radial distortion + and another two for tangential: [ k1 k2 p1 p2 ] */ + float rotMatr[9]; + float transVect[3]; /* rotation matrix and transition vector relatively + to some reference point in the space. */ +} CvCamera; + +typedef struct CvStereoCamera +{ + CvCamera* camera[2]; /* two individual camera parameters */ + float fundMatr[9]; /* fundamental matrix */ + + /* New part for stereo */ + CvPoint3D32f epipole[2]; + CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after + epipolar geometry rectification */ + double coeffs[2][3][3];/* coefficients for transformation */ + CvPoint2D32f border[2][4]; + CvSize warpSize; + CvStereoLineCoeff* lineCoeffs; + int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */ + float rotMatrix[9]; + float transVector[3]; +} CvStereoCamera; + + +typedef struct CvContourOrientation +{ + float egvals[2]; + float egvects[4]; + + float max, min; // minimum and maximum projections + int imax, imin; +} CvContourOrientation; + +#define CV_CAMERA_TO_WARP 1 +#define CV_WARP_TO_CAMERA 2 + +CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3], + CvPoint2D32f* cameraPoint, + CvPoint2D32f* warpPoint, + int direction); + +CVAPI(int) icvGetSymPoint3D( CvPoint3D64f pointCorner, + CvPoint3D64f point1, + CvPoint3D64f point2, + CvPoint3D64f *pointSym2); + +CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist); + +CVAPI(int) icvCompute3DPoint( double alpha,double betta, + CvStereoLineCoeff* coeffs, + CvPoint3D64f* point); + +CVAPI(int) icvCreateConvertMatrVect( double* rotMatr1, + double* transVect1, + double* rotMatr2, + double* transVect2, + double* convRotMatr, + double* convTransVect); + +CVAPI(int) icvConvertPointSystem(CvPoint3D64f M2, + CvPoint3D64f* M1, + double* rotMatr, + double* transVect + ); + +CVAPI(int) icvComputeCoeffForStereo( CvStereoCamera* stereoCamera); + +CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross); +CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross); +CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point); +CVAPI(int) icvStereoCalibration( int numImages, + int* nums, + CvSize imageSize, + CvPoint2D32f* imagePoints1, + CvPoint2D32f* imagePoints2, + CvPoint3D32f* objectPoints, + CvStereoCamera* stereoparams + ); + + +CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams); + +CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY ); + +CVAPI(int) icvComCoeffForLine( CvPoint2D64f point1, + CvPoint2D64f point2, + CvPoint2D64f point3, + CvPoint2D64f point4, + double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvStereoLineCoeff* coeffs, + int* needSwapCameras); + +CVAPI(int) icvGetDirectionForPoint( CvPoint2D64f point, + double* camMatr, + CvPoint3D64f* direct); + +CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12, + CvPoint3D64f point21,CvPoint3D64f point22, + CvPoint3D64f* midPoint); + +CVAPI(int) icvComputeStereoLineCoeffs( CvPoint3D64f pointA, + CvPoint3D64f pointB, + CvPoint3D64f pointCam1, + double gamma, + CvStereoLineCoeff* coeffs); + +/*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvPoint2D64f* epipole1, + CvPoint2D64f* epipole2, + double* fundMatr);*/ + +CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2); + +CVAPI(void) icvGetCoefForPiece( CvPoint2D64f p_start,CvPoint2D64f p_end, + double *a,double *b,double *c, + int* result); + +/*CVAPI(void) icvGetCommonArea( CvSize imageSize, + CvPoint2D64f epipole1,CvPoint2D64f epipole2, + double* fundMatr, + double* coeff11,double* coeff12, + double* coeff21,double* coeff22, + int* result);*/ + +CVAPI(void) icvComputeeInfiniteProject1(double* rotMatr, + double* camMatr1, + double* camMatr2, + CvPoint2D32f point1, + CvPoint2D32f *point2); + +CVAPI(void) icvComputeeInfiniteProject2(double* rotMatr, + double* camMatr1, + double* camMatr2, + CvPoint2D32f* point1, + CvPoint2D32f point2); + +CVAPI(void) icvGetCrossDirectDirect( double* direct1,double* direct2, + CvPoint2D64f *cross,int* result); + +CVAPI(void) icvGetCrossPieceDirect( CvPoint2D64f p_start,CvPoint2D64f p_end, + double a,double b,double c, + CvPoint2D64f *cross,int* result); + +CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end, + CvPoint2D64f p2_start,CvPoint2D64f p2_end, + CvPoint2D64f* cross, + int* result); + +CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist); + +CVAPI(void) icvGetCrossRectDirect( CvSize imageSize, + double a,double b,double c, + CvPoint2D64f *start,CvPoint2D64f *end, + int* result); + +CVAPI(void) icvProjectPointToImage( CvPoint3D64f point, + double* camMatr,double* rotMatr,double* transVect, + CvPoint2D64f* projPoint); + +CVAPI(void) icvGetQuadsTransform( CvSize imageSize, + double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvSize* warpSize, + double quad1[4][2], + double quad2[4][2], + double* fundMatr, + CvPoint3D64f* epipole1, + CvPoint3D64f* epipole2 + ); + +CVAPI(void) icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera); + +CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera); + +CVAPI(void) icvGetCutPiece( double* areaLineCoef1,double* areaLineCoef2, + CvPoint2D64f epipole, + CvSize imageSize, + CvPoint2D64f* point11,CvPoint2D64f* point12, + CvPoint2D64f* point21,CvPoint2D64f* point22, + int* result); + +CVAPI(void) icvGetMiddleAnglePoint( CvPoint2D64f basePoint, + CvPoint2D64f point1,CvPoint2D64f point2, + CvPoint2D64f* midPoint); + +CVAPI(void) icvGetNormalDirect(double* direct,CvPoint2D64f point,double* normDirect); + +CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2); + +CVAPI(void) icvProjectPointToDirect( CvPoint2D64f point,double* lineCoeff, + CvPoint2D64f* projectPoint); + +CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,double* lineCoef,double*dist); + +CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst, + int desired_depth, int desired_num_channels ); + +CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd ); + +/*CVAPI(int) icvSelectBestRt( int numImages, + int* numPoints, + CvSize imageSize, + CvPoint2D32f* imagePoints1, + CvPoint2D32f* imagePoints2, + CvPoint3D32f* objectPoints, + + CvMatr32f cameraMatrix1, + CvVect32f distortion1, + CvMatr32f rotMatrs1, + CvVect32f transVects1, + + CvMatr32f cameraMatrix2, + CvVect32f distortion2, + CvMatr32f rotMatrs2, + CvVect32f transVects2, + + CvMatr32f bestRotMatr, + CvVect32f bestTransVect + );*/ + + +/****************************************************************************************\ +* Contour Tree * +\****************************************************************************************/ + +/* Contour tree header */ +typedef struct CvContourTree +{ + CV_SEQUENCE_FIELDS() + CvPoint p1; /* the first point of the binary tree root segment */ + CvPoint p2; /* the last point of the binary tree root segment */ +} CvContourTree; + +/* Builds hierarhical representation of a contour */ +CVAPI(CvContourTree*) cvCreateContourTree( const CvSeq* contour, + CvMemStorage* storage, + double threshold ); + +/* Reconstruct (completelly or partially) contour a from contour tree */ +CVAPI(CvSeq*) cvContourFromContourTree( const CvContourTree* tree, + CvMemStorage* storage, + CvTermCriteria criteria ); + +/* Compares two contour trees */ +enum { CV_CONTOUR_TREES_MATCH_I1 = 1 }; + +CVAPI(double) cvMatchContourTrees( const CvContourTree* tree1, + const CvContourTree* tree2, + int method, double threshold ); + +/****************************************************************************************\ +* Contour Morphing * +\****************************************************************************************/ + +/* finds correspondence between two contours */ +CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1, + const CvSeq* contour2, + CvMemStorage* storage); + +/* morphs contours using the pre-calculated correspondence: + alpha=0 ~ contour1, alpha=1 ~ contour2 */ +CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2, + CvSeq* corr, double alpha, + CvMemStorage* storage ); + + +/****************************************************************************************\ +* Active Contours * +\****************************************************************************************/ + +#define CV_VALUE 1 +#define CV_ARRAY 2 +/* Updates active contour in order to minimize its cummulative + (internal and external) energy. */ +CVAPI(void) cvSnakeImage( const IplImage* image, CvPoint* points, + int length, float* alpha, + float* beta, float* gamma, + int coeff_usage, CvSize win, + CvTermCriteria criteria, int calc_gradient CV_DEFAULT(1)); + +/****************************************************************************************\ +* Texture Descriptors * +\****************************************************************************************/ + +#define CV_GLCM_OPTIMIZATION_NONE -2 +#define CV_GLCM_OPTIMIZATION_LUT -1 +#define CV_GLCM_OPTIMIZATION_HISTOGRAM 0 + +#define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST 10 +#define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST 11 +#define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM 4 + +#define CV_GLCMDESC_ENTROPY 0 +#define CV_GLCMDESC_ENERGY 1 +#define CV_GLCMDESC_HOMOGENITY 2 +#define CV_GLCMDESC_CONTRAST 3 +#define CV_GLCMDESC_CLUSTERTENDENCY 4 +#define CV_GLCMDESC_CLUSTERSHADE 5 +#define CV_GLCMDESC_CORRELATION 6 +#define CV_GLCMDESC_CORRELATIONINFO1 7 +#define CV_GLCMDESC_CORRELATIONINFO2 8 +#define CV_GLCMDESC_MAXIMUMPROBABILITY 9 + +#define CV_GLCM_ALL 0 +#define CV_GLCM_GLCM 1 +#define CV_GLCM_DESC 2 + +typedef struct CvGLCM CvGLCM; + +CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage, + int stepMagnitude, + const int* stepDirections CV_DEFAULT(0), + int numStepDirections CV_DEFAULT(0), + int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE)); + +CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL)); + +CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM, + int descriptorOptimizationType + CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST)); + +CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor ); + +CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor, + double* average, double* standardDeviation ); + +CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step ); + +/****************************************************************************************\ +* Face eyes&mouth tracking * +\****************************************************************************************/ + + +typedef struct CvFaceTracker CvFaceTracker; + +#define CV_NUM_FACE_ELEMENTS 3 +enum CV_FACE_ELEMENTS +{ + CV_FACE_MOUTH = 0, + CV_FACE_LEFT_EYE = 1, + CV_FACE_RIGHT_EYE = 2 +}; + +CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray, + CvRect* pRects, int nRects); +CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray, + CvRect* pRects, int nRects, + CvPoint* ptRotate, double* dbAngleRotate); +CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker); + + +typedef struct CvFace +{ + CvRect MouthRect; + CvRect LeftEyeRect; + CvRect RightEyeRect; +} CvFaceData; + +CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage); +CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage); + + +/****************************************************************************************\ +* 3D Tracker * +\****************************************************************************************/ + +typedef unsigned char CvBool; + +typedef struct Cv3dTracker2dTrackedObject +{ + int id; + CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float +} Cv3dTracker2dTrackedObject; + +CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p) +{ + Cv3dTracker2dTrackedObject r; + r.id = id; + r.p = p; + return r; +} + +typedef struct Cv3dTrackerTrackedObject +{ + int id; + CvPoint3D32f p; // location of the tracked object +} Cv3dTrackerTrackedObject; + +CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p) +{ + Cv3dTrackerTrackedObject r; + r.id = id; + r.p = p; + return r; +} + +typedef struct Cv3dTrackerCameraInfo +{ + CvBool valid; + float mat[4][4]; /* maps camera coordinates to world coordinates */ + CvPoint2D32f principal_point; /* copied from intrinsics so this structure */ + /* has all the info we need */ +} Cv3dTrackerCameraInfo; + +typedef struct Cv3dTrackerCameraIntrinsics +{ + CvPoint2D32f principal_point; + float focal_length[2]; + float distortion[4]; +} Cv3dTrackerCameraIntrinsics; + +CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras, + const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */ + CvSize etalon_size, + float square_size, + IplImage *samples[], /* size is num_cameras */ + Cv3dTrackerCameraInfo camera_info[]); /* size is num_cameras */ + +CVAPI(int) cv3dTrackerLocateObjects(int num_cameras, int num_objects, + const Cv3dTrackerCameraInfo camera_info[], /* size is num_cameras */ + const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */ + Cv3dTrackerTrackedObject tracked_objects[]); /* size is num_objects */ +/**************************************************************************************** + tracking_info is a rectangular array; one row per camera, num_objects elements per row. + The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On + completion, the return value is the number of objects located; i.e., the number of objects + visible by more than one camera. The id field of any unused slots in tracked objects is + set to -1. +****************************************************************************************/ + + +/****************************************************************************************\ +* Skeletons and Linear-Contour Models * +\****************************************************************************************/ + +typedef enum CvLeeParameters +{ + CV_LEE_INT = 0, + CV_LEE_FLOAT = 1, + CV_LEE_DOUBLE = 2, + CV_LEE_AUTO = -1, + CV_LEE_ERODE = 0, + CV_LEE_ZOOM = 1, + CV_LEE_NON = 2 +} CvLeeParameters; + +#define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))]) +#define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))]) +#define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0]) +#define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1]) +#define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)]) +#define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))]) +#define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))]) +#define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))]) +#define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))]) + +#define CV_VORONOISITE2D_FIELDS() \ + struct CvVoronoiNode2D *node[2]; \ + struct CvVoronoiEdge2D *edge[2]; + +typedef struct CvVoronoiSite2D +{ + CV_VORONOISITE2D_FIELDS() + struct CvVoronoiSite2D *next[2]; +} CvVoronoiSite2D; + +#define CV_VORONOIEDGE2D_FIELDS() \ + struct CvVoronoiNode2D *node[2]; \ + struct CvVoronoiSite2D *site[2]; \ + struct CvVoronoiEdge2D *next[4]; + +typedef struct CvVoronoiEdge2D +{ + CV_VORONOIEDGE2D_FIELDS() +} CvVoronoiEdge2D; + +#define CV_VORONOINODE2D_FIELDS() \ + CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \ + CvPoint2D32f pt; \ + float radius; + +typedef struct CvVoronoiNode2D +{ + CV_VORONOINODE2D_FIELDS() +} CvVoronoiNode2D; + +#define CV_VORONOIDIAGRAM2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + CvSet *sites; + +typedef struct CvVoronoiDiagram2D +{ + CV_VORONOIDIAGRAM2D_FIELDS() +} CvVoronoiDiagram2D; + +/* Computes Voronoi Diagram for given polygons with holes */ +CVAPI(int) cvVoronoiDiagramFromContour(CvSeq* ContourSeq, + CvVoronoiDiagram2D** VoronoiDiagram, + CvMemStorage* VoronoiStorage, + CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT), + int contour_orientation CV_DEFAULT(-1), + int attempt_number CV_DEFAULT(10)); + +/* Computes Voronoi Diagram for domains in given image */ +CVAPI(int) cvVoronoiDiagramFromImage(IplImage* pImage, + CvSeq** ContourSeq, + CvVoronoiDiagram2D** VoronoiDiagram, + CvMemStorage* VoronoiStorage, + CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON), + float approx_precision CV_DEFAULT(CV_LEE_AUTO)); + +/* Deallocates the storage */ +CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram, + CvMemStorage** pVoronoiStorage); + +/*********************** Linear-Contour Model ****************************/ + +struct CvLCMEdge; +struct CvLCMNode; + +typedef struct CvLCMEdge +{ + CV_GRAPH_EDGE_FIELDS() + CvSeq* chain; + float width; + int index1; + int index2; +} CvLCMEdge; + +typedef struct CvLCMNode +{ + CV_GRAPH_VERTEX_FIELDS() + CvContour* contour; +} CvLCMNode; + + +/* Computes hybrid model from Voronoi Diagram */ +CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram, + float maxWidth); + +/* Releases hybrid model storage */ +CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph); + + +/* two stereo-related functions */ + +CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3], + CvArr* rectMap ); + +/*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params, + CvArr* rectMap1, CvArr* rectMap2, + int do_undistortion );*/ + +/*************************** View Morphing Functions ************************/ + +typedef struct CvMatrix3 +{ + float m[3][3]; +} CvMatrix3; + +/* The order of the function corresponds to the order they should appear in + the view morphing pipeline */ + +/* Finds ending points of scanlines on left and right images of stereo-pair */ +CVAPI(void) cvMakeScanlines( const CvMatrix3* matrix, CvSize img_size, + int* scanlines1, int* scanlines2, + int* lengths1, int* lengths2, + int* line_count ); + +/* Grab pixel values from scanlines and stores them sequentially + (some sort of perspective image transform) */ +CVAPI(void) cvPreWarpImage( int line_count, + IplImage* img, + uchar* dst, + int* dst_nums, + int* scanlines); + +/* Approximate each grabbed scanline by a sequence of runs + (lossy run-length compression) */ +CVAPI(void) cvFindRuns( int line_count, + uchar* prewarp1, + uchar* prewarp2, + int* line_lengths1, + int* line_lengths2, + int* runs1, + int* runs2, + int* num_runs1, + int* num_runs2); + +/* Compares two sets of compressed scanlines */ +CVAPI(void) cvDynamicCorrespondMulti( int line_count, + int* first, + int* first_runs, + int* second, + int* second_runs, + int* first_corr, + int* second_corr); + +/* Finds scanline ending coordinates for some intermediate "virtual" camera position */ +CVAPI(void) cvMakeAlphaScanlines( int* scanlines1, + int* scanlines2, + int* scanlinesA, + int* lengths, + int line_count, + float alpha); + +/* Blends data of the left and right image scanlines to get + pixel values of "virtual" image scanlines */ +CVAPI(void) cvMorphEpilinesMulti( int line_count, + uchar* first_pix, + int* first_num, + uchar* second_pix, + int* second_num, + uchar* dst_pix, + int* dst_num, + float alpha, + int* first, + int* first_runs, + int* second, + int* second_runs, + int* first_corr, + int* second_corr); + +/* Does reverse warping of the morphing result to make + it fill the destination image rectangle */ +CVAPI(void) cvPostWarpImage( int line_count, + uchar* src, + int* src_nums, + IplImage* img, + int* scanlines); + +/* Deletes Moire (missed pixels that appear due to discretization) */ +CVAPI(void) cvDeleteMoire( IplImage* img ); + + +typedef struct CvConDensation +{ + int MP; + int DP; + float* DynamMatr; /* Matrix of the linear Dynamics system */ + float* State; /* Vector of State */ + int SamplesNum; /* Number of the Samples */ + float** flSamples; /* arr of the Sample Vectors */ + float** flNewSamples; /* temporary array of the Sample Vectors */ + float* flConfidence; /* Confidence for each Sample */ + float* flCumulative; /* Cumulative confidence */ + float* Temp; /* Temporary vector */ + float* RandomSample; /* RandomVector to update sample set */ + struct CvRandState* RandS; /* Array of structures to generate random vectors */ +} CvConDensation; + +/* Creates ConDensation filter state */ +CVAPI(CvConDensation*) cvCreateConDensation( int dynam_params, + int measure_params, + int sample_count ); + +/* Releases ConDensation filter state */ +CVAPI(void) cvReleaseConDensation( CvConDensation** condens ); + +/* Updates ConDensation filter by time (predict future state of the system) */ +CVAPI(void) cvConDensUpdateByTime( CvConDensation* condens); + +/* Initializes ConDensation filter samples */ +CVAPI(void) cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound ); + +CV_INLINE int iplWidth( const IplImage* img ) +{ + return !img ? 0 : !img->roi ? img->width : img->roi->width; +} + +CV_INLINE int iplHeight( const IplImage* img ) +{ + return !img ? 0 : !img->roi ? img->height : img->roi->height; +} + +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus + +/****************************************************************************************\ +* Calibration engine * +\****************************************************************************************/ + +typedef enum CvCalibEtalonType +{ + CV_CALIB_ETALON_USER = -1, + CV_CALIB_ETALON_CHESSBOARD = 0, + CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD +} +CvCalibEtalonType; + +class CV_EXPORTS CvCalibFilter +{ +public: + /* Constructor & destructor */ + CvCalibFilter(); + virtual ~CvCalibFilter(); + + /* Sets etalon type - one for all cameras. + etalonParams is used in case of pre-defined etalons (such as chessboard). + Number of elements in etalonParams is determined by etalonType. + E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then: + etalonParams[0] is number of squares per one side of etalon + etalonParams[1] is number of squares per another side of etalon + etalonParams[2] is linear size of squares in the board in arbitrary units. + pointCount & points are used in case of + CV_CALIB_ETALON_USER (user-defined) etalon. */ + virtual bool + SetEtalon( CvCalibEtalonType etalonType, double* etalonParams, + int pointCount = 0, CvPoint2D32f* points = 0 ); + + /* Retrieves etalon parameters/or and points */ + virtual CvCalibEtalonType + GetEtalon( int* paramCount = 0, const double** etalonParams = 0, + int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const; + + /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */ + virtual void SetCameraCount( int cameraCount ); + + /* Retrieves number of cameras */ + int GetCameraCount() const { return cameraCount; } + + /* Starts cameras calibration */ + virtual bool SetFrames( int totalFrames ); + + /* Stops cameras calibration */ + virtual void Stop( bool calibrate = false ); + + /* Retrieves number of cameras */ + bool IsCalibrated() const { return isCalibrated; } + + /* Feeds another serie of snapshots (one per each camera) to filter. + Etalon points on these images are found automatically. + If the function can't locate points, it returns false */ + virtual bool FindEtalon( IplImage** imgs ); + + /* The same but takes matrices */ + virtual bool FindEtalon( CvMat** imgs ); + + /* Lower-level function for feeding filter with already found etalon points. + Array of point arrays for each camera is passed. */ + virtual bool Push( const CvPoint2D32f** points = 0 ); + + /* Returns total number of accepted frames and, optionally, + total number of frames to collect */ + virtual int GetFrameCount( int* framesTotal = 0 ) const; + + /* Retrieves camera parameters for specified camera. + If camera is not calibrated the function returns 0 */ + virtual const CvCamera* GetCameraParams( int idx = 0 ) const; + + virtual const CvStereoCamera* GetStereoParams() const; + + /* Sets camera parameters for all cameras */ + virtual bool SetCameraParams( CvCamera* params ); + + /* Saves all camera parameters to file */ + virtual bool SaveCameraParams( const char* filename ); + + /* Loads all camera parameters from file */ + virtual bool LoadCameraParams( const char* filename ); + + /* Undistorts images using camera parameters. Some of src pointers can be NULL. */ + virtual bool Undistort( IplImage** src, IplImage** dst ); + + /* Undistorts images using camera parameters. Some of src pointers can be NULL. */ + virtual bool Undistort( CvMat** src, CvMat** dst ); + + /* Returns array of etalon points detected/partally detected + on the latest frame for idx-th camera */ + virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts, + int* count, bool* found ); + + /* Draw the latest detected/partially detected etalon */ + virtual void DrawPoints( IplImage** dst ); + + /* Draw the latest detected/partially detected etalon */ + virtual void DrawPoints( CvMat** dst ); + + virtual bool Rectify( IplImage** srcarr, IplImage** dstarr ); + virtual bool Rectify( CvMat** srcarr, CvMat** dstarr ); + +protected: + + enum { MAX_CAMERAS = 3 }; + + /* etalon data */ + CvCalibEtalonType etalonType; + int etalonParamCount; + double* etalonParams; + int etalonPointCount; + CvPoint2D32f* etalonPoints; + CvSize imgSize; + CvMat* grayImg; + CvMat* tempImg; + CvMemStorage* storage; + + /* camera data */ + int cameraCount; + CvCamera cameraParams[MAX_CAMERAS]; + CvStereoCamera stereo; + CvPoint2D32f* points[MAX_CAMERAS]; + CvMat* undistMap[MAX_CAMERAS][2]; + CvMat* undistImg; + int latestCounts[MAX_CAMERAS]; + CvPoint2D32f* latestPoints[MAX_CAMERAS]; + CvMat* rectMap[MAX_CAMERAS][2]; + + /* Added by Valery */ + //CvStereoCamera stereoParams; + + int maxPoints; + int framesTotal; + int framesAccepted; + bool isCalibrated; +}; + +#include +#include + +class CV_EXPORTS CvImage +{ +public: + CvImage() : image(0), refcount(0) {} + CvImage( CvSize _size, int _depth, int _channels ) + { + image = cvCreateImage( _size, _depth, _channels ); + refcount = image ? new int(1) : 0; + } + + CvImage( IplImage* img ) : image(img) + { + refcount = image ? new int(1) : 0; + } + + CvImage( const CvImage& img ) : image(img.image), refcount(img.refcount) + { + if( refcount ) ++(*refcount); + } + + CvImage( const char* filename, const char* imgname=0, int color=-1 ) : image(0), refcount(0) + { load( filename, imgname, color ); } + + CvImage( CvFileStorage* fs, const char* mapname, const char* imgname ) : image(0), refcount(0) + { read( fs, mapname, imgname ); } + + CvImage( CvFileStorage* fs, const char* seqname, int idx ) : image(0), refcount(0) + { read( fs, seqname, idx ); } + + ~CvImage() + { + if( refcount && !(--*refcount) ) + { + cvReleaseImage( &image ); + delete refcount; + } + } + + CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); } + + void create( CvSize _size, int _depth, int _channels ) + { + if( !image || !refcount || + image->width != _size.width || image->height != _size.height || + image->depth != _depth || image->nChannels != _channels ) + attach( cvCreateImage( _size, _depth, _channels )); + } + + void release() { detach(); } + void clear() { detach(); } + + void attach( IplImage* img, bool use_refcount=true ) + { + if( refcount && --*refcount == 0 ) + { + cvReleaseImage( &image ); + delete refcount; + } + image = img; + refcount = use_refcount && image ? new int(1) : 0; + } + + void detach() + { + if( refcount && --*refcount == 0 ) + { + cvReleaseImage( &image ); + delete refcount; + } + image = 0; + refcount = 0; + } + + bool load( const char* filename, const char* imgname=0, int color=-1 ); + bool read( CvFileStorage* fs, const char* mapname, const char* imgname ); + bool read( CvFileStorage* fs, const char* seqname, int idx ); + void save( const char* filename, const char* imgname, const int* params=0 ); + void write( CvFileStorage* fs, const char* imgname ); + + void show( const char* window_name ); + bool is_valid() { return image != 0; } + + int width() const { return image ? image->width : 0; } + int height() const { return image ? image->height : 0; } + + CvSize size() const { return image ? cvSize(image->width, image->height) : cvSize(0,0); } + + CvSize roi_size() const + { + return !image ? cvSize(0,0) : + !image->roi ? cvSize(image->width,image->height) : + cvSize(image->roi->width, image->roi->height); + } + + CvRect roi() const + { + return !image ? cvRect(0,0,0,0) : + !image->roi ? cvRect(0,0,image->width,image->height) : + cvRect(image->roi->xOffset,image->roi->yOffset, + image->roi->width,image->roi->height); + } + + int coi() const { return !image || !image->roi ? 0 : image->roi->coi; } + + void set_roi(CvRect _roi) { cvSetImageROI(image,_roi); } + void reset_roi() { cvResetImageROI(image); } + void set_coi(int _coi) { cvSetImageCOI(image,_coi); } + int depth() const { return image ? image->depth : 0; } + int channels() const { return image ? image->nChannels : 0; } + int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; } + + uchar* data() { return image ? (uchar*)image->imageData : 0; } + const uchar* data() const { return image ? (const uchar*)image->imageData : 0; } + int step() const { return image ? image->widthStep : 0; } + int origin() const { return image ? image->origin : 0; } + + uchar* roi_row(int y) + { + assert(0<=y); + assert(!image ? + 1 : image->roi ? + yroi->height : yheight); + + return !image ? 0 : + !image->roi ? + (uchar*)(image->imageData + y*image->widthStep) : + (uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep + + image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels); + } + + const uchar* roi_row(int y) const + { + assert(0<=y); + assert(!image ? + 1 : image->roi ? + yroi->height : yheight); + + return !image ? 0 : + !image->roi ? + (const uchar*)(image->imageData + y*image->widthStep) : + (const uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep + + image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels); + } + + operator const IplImage* () const { return image; } + operator IplImage* () { return image; } + + CvImage& operator = (const CvImage& img) + { + if( img.refcount ) + ++*img.refcount; + if( refcount && !(--*refcount) ) + cvReleaseImage( &image ); + image=img.image; + refcount=img.refcount; + return *this; + } + +protected: + IplImage* image; + int* refcount; +}; + + +class CV_EXPORTS CvMatrix +{ +public: + CvMatrix() : matrix(0) {} + CvMatrix( int _rows, int _cols, int _type ) + { matrix = cvCreateMat( _rows, _cols, _type ); } + + CvMatrix( int _rows, int _cols, int _type, CvMat* hdr, + void* _data=0, int _step=CV_AUTOSTEP ) + { matrix = cvInitMatHeader( hdr, _rows, _cols, _type, _data, _step ); } + + CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true ); + + CvMatrix( int _rows, int _cols, int _type, void* _data, int _step=CV_AUTOSTEP ) + { matrix = cvCreateMatHeader( _rows, _cols, _type ); + cvSetData( matrix, _data, _step ); } + + CvMatrix( CvMat* m ) + { matrix = m; } + + CvMatrix( const CvMatrix& m ) + { + matrix = m.matrix; + addref(); + } + + CvMatrix( const char* filename, const char* matname=0, int color=-1 ) : matrix(0) + { load( filename, matname, color ); } + + CvMatrix( CvFileStorage* fs, const char* mapname, const char* matname ) : matrix(0) + { read( fs, mapname, matname ); } + + CvMatrix( CvFileStorage* fs, const char* seqname, int idx ) : matrix(0) + { read( fs, seqname, idx ); } + + ~CvMatrix() + { + release(); + } + + CvMatrix clone() { return CvMatrix(matrix ? cvCloneMat(matrix) : 0); } + + void set( CvMat* m, bool add_ref ) + { + release(); + matrix = m; + if( add_ref ) + addref(); + } + + void create( int _rows, int _cols, int _type ) + { + if( !matrix || !matrix->refcount || + matrix->rows != _rows || matrix->cols != _cols || + CV_MAT_TYPE(matrix->type) != _type ) + set( cvCreateMat( _rows, _cols, _type ), false ); + } + + void addref() const + { + if( matrix ) + { + if( matrix->hdr_refcount ) + ++matrix->hdr_refcount; + else if( matrix->refcount ) + ++*matrix->refcount; + } + } + + void release() + { + if( matrix ) + { + if( matrix->hdr_refcount ) + { + if( --matrix->hdr_refcount == 0 ) + cvReleaseMat( &matrix ); + } + else if( matrix->refcount ) + { + if( --*matrix->refcount == 0 ) + cvFree( &matrix->refcount ); + } + matrix = 0; + } + } + + void clear() + { + release(); + } + + bool load( const char* filename, const char* matname=0, int color=-1 ); + bool read( CvFileStorage* fs, const char* mapname, const char* matname ); + bool read( CvFileStorage* fs, const char* seqname, int idx ); + void save( const char* filename, const char* matname, const int* params=0 ); + void write( CvFileStorage* fs, const char* matname ); + + void show( const char* window_name ); + + bool is_valid() { return matrix != 0; } + + int rows() const { return matrix ? matrix->rows : 0; } + int cols() const { return matrix ? matrix->cols : 0; } + + CvSize size() const + { + return !matrix ? cvSize(0,0) : cvSize(matrix->rows,matrix->cols); + } + + int type() const { return matrix ? CV_MAT_TYPE(matrix->type) : 0; } + int depth() const { return matrix ? CV_MAT_DEPTH(matrix->type) : 0; } + int channels() const { return matrix ? CV_MAT_CN(matrix->type) : 0; } + int pix_size() const { return matrix ? CV_ELEM_SIZE(matrix->type) : 0; } + + uchar* data() { return matrix ? matrix->data.ptr : 0; } + const uchar* data() const { return matrix ? matrix->data.ptr : 0; } + int step() const { return matrix ? matrix->step : 0; } + + void set_data( void* _data, int _step=CV_AUTOSTEP ) + { cvSetData( matrix, _data, _step ); } + + uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; } + const uchar* row(int i) const + { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; } + + operator const CvMat* () const { return matrix; } + operator CvMat* () { return matrix; } + + CvMatrix& operator = (const CvMatrix& _m) + { + _m.addref(); + release(); + matrix = _m.matrix; + return *this; + } + +protected: + CvMat* matrix; +}; + +/****************************************************************************************\ + * CamShiftTracker * + \****************************************************************************************/ + +class CV_EXPORTS CvCamShiftTracker +{ +public: + + CvCamShiftTracker(); + virtual ~CvCamShiftTracker(); + + /**** Characteristics of the object that are calculated by track_object method *****/ + float get_orientation() const // orientation of the object in degrees + { return m_box.angle; } + float get_length() const // the larger linear size of the object + { return m_box.size.height; } + float get_width() const // the smaller linear size of the object + { return m_box.size.width; } + CvPoint2D32f get_center() const // center of the object + { return m_box.center; } + CvRect get_window() const // bounding rectangle for the object + { return m_comp.rect; } + + /*********************** Tracking parameters ************************/ + int get_threshold() const // thresholding value that applied to back project + { return m_threshold; } + + int get_hist_dims( int* dims = 0 ) const // returns number of histogram dimensions and sets + { return m_hist ? cvGetDims( m_hist->bins, dims ) : 0; } + + int get_min_ch_val( int channel ) const // get the minimum allowed value of the specified channel + { return m_min_ch_val[channel]; } + + int get_max_ch_val( int channel ) const // get the maximum allowed value of the specified channel + { return m_max_ch_val[channel]; } + + // set initial object rectangle (must be called before initial calculation of the histogram) + bool set_window( CvRect window) + { m_comp.rect = window; return true; } + + bool set_threshold( int threshold ) // threshold applied to the histogram bins + { m_threshold = threshold; return true; } + + bool set_hist_bin_range( int dim, int min_val, int max_val ); + + bool set_hist_dims( int c_dims, int* dims );// set the histogram parameters + + bool set_min_ch_val( int channel, int val ) // set the minimum allowed value of the specified channel + { m_min_ch_val[channel] = val; return true; } + bool set_max_ch_val( int channel, int val ) // set the maximum allowed value of the specified channel + { m_max_ch_val[channel] = val; return true; } + + /************************ The processing methods *********************************/ + // update object position + virtual bool track_object( const IplImage* cur_frame ); + + // update object histogram + virtual bool update_histogram( const IplImage* cur_frame ); + + // reset histogram + virtual void reset_histogram(); + + /************************ Retrieving internal data *******************************/ + // get back project image + virtual IplImage* get_back_project() + { return m_back_project; } + + float query( int* bin ) const + { return m_hist ? (float)cvGetRealND(m_hist->bins, bin) : 0.f; } + +protected: + + // internal method for color conversion: fills m_color_planes group + virtual void color_transform( const IplImage* img ); + + CvHistogram* m_hist; + + CvBox2D m_box; + CvConnectedComp m_comp; + + float m_hist_ranges_data[CV_MAX_DIM][2]; + float* m_hist_ranges[CV_MAX_DIM]; + + int m_min_ch_val[CV_MAX_DIM]; + int m_max_ch_val[CV_MAX_DIM]; + int m_threshold; + + IplImage* m_color_planes[CV_MAX_DIM]; + IplImage* m_back_project; + IplImage* m_temp; + IplImage* m_mask; +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ +struct CV_EXPORTS_W_MAP CvEMParams +{ + CvEMParams(); + CvEMParams( int nclusters, int cov_mat_type=cv::EM::COV_MAT_DIAGONAL, + int start_step=cv::EM::START_AUTO_STEP, + CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), + const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 ); + + CV_PROP_RW int nclusters; + CV_PROP_RW int cov_mat_type; + CV_PROP_RW int start_step; + const CvMat* probs; + const CvMat* weights; + const CvMat* means; + const CvMat** covs; + CV_PROP_RW CvTermCriteria term_crit; +}; + + +class CV_EXPORTS_W CvEM : public CvStatModel +{ +public: + // Type of covariation matrices + enum { COV_MAT_SPHERICAL=cv::EM::COV_MAT_SPHERICAL, + COV_MAT_DIAGONAL =cv::EM::COV_MAT_DIAGONAL, + COV_MAT_GENERIC =cv::EM::COV_MAT_GENERIC }; + + // The initial step + enum { START_E_STEP=cv::EM::START_E_STEP, + START_M_STEP=cv::EM::START_M_STEP, + START_AUTO_STEP=cv::EM::START_AUTO_STEP }; + + CV_WRAP CvEM(); + CvEM( const CvMat* samples, const CvMat* sampleIdx=0, + CvEMParams params=CvEMParams(), CvMat* labels=0 ); + + virtual ~CvEM(); + + virtual bool train( const CvMat* samples, const CvMat* sampleIdx=0, + CvEMParams params=CvEMParams(), CvMat* labels=0 ); + + virtual float predict( const CvMat* sample, CV_OUT CvMat* probs ) const; + + CV_WRAP CvEM( const cv::Mat& samples, const cv::Mat& sampleIdx=cv::Mat(), + CvEMParams params=CvEMParams() ); + + CV_WRAP virtual bool train( const cv::Mat& samples, + const cv::Mat& sampleIdx=cv::Mat(), + CvEMParams params=CvEMParams(), + CV_OUT cv::Mat* labels=0 ); + + CV_WRAP virtual float predict( const cv::Mat& sample, CV_OUT cv::Mat* probs=0 ) const; + CV_WRAP virtual double calcLikelihood( const cv::Mat &sample ) const; + + CV_WRAP int getNClusters() const; + CV_WRAP cv::Mat getMeans() const; + CV_WRAP void getCovs(CV_OUT std::vector& covs) const; + CV_WRAP cv::Mat getWeights() const; + CV_WRAP cv::Mat getProbs() const; + + CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; } + + CV_WRAP virtual void clear(); + + int get_nclusters() const; + const CvMat* get_means() const; + const CvMat** get_covs() const; + const CvMat* get_weights() const; + const CvMat* get_probs() const; + + inline double get_log_likelihood() const { return getLikelihood(); } + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + +protected: + void set_mat_hdrs(); + + cv::EM emObj; + cv::Mat probs; + double logLikelihood; + + CvMat meansHdr; + std::vector covsHdrs; + std::vector covsPtrs; + CvMat weightsHdr; + CvMat probsHdr; +}; + +namespace cv +{ + +typedef CvEMParams EMParams; +typedef CvEM ExpectationMaximization; + +/*! + The Patch Generator class + */ +class CV_EXPORTS PatchGenerator +{ +public: + PatchGenerator(); + PatchGenerator(double _backgroundMin, double _backgroundMax, + double _noiseRange, bool _randomBlur=true, + double _lambdaMin=0.6, double _lambdaMax=1.5, + double _thetaMin=-CV_PI, double _thetaMax=CV_PI, + double _phiMin=-CV_PI, double _phiMax=CV_PI ); + void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const; + void operator()(const Mat& image, const Mat& transform, Mat& patch, + Size patchSize, RNG& rng) const; + void warpWholeImage(const Mat& image, Mat& matT, Mat& buf, + CV_OUT Mat& warped, int border, RNG& rng) const; + void generateRandomTransform(Point2f srcCenter, Point2f dstCenter, + CV_OUT Mat& transform, RNG& rng, + bool inverse=false) const; + void setAffineParam(double lambda, double theta, double phi); + + double backgroundMin, backgroundMax; + double noiseRange; + bool randomBlur; + double lambdaMin, lambdaMax; + double thetaMin, thetaMax; + double phiMin, phiMax; +}; + + +class CV_EXPORTS LDetector +{ +public: + LDetector(); + LDetector(int _radius, int _threshold, int _nOctaves, + int _nViews, double _baseFeatureSize, double _clusteringDistance); + void operator()(const Mat& image, + CV_OUT vector& keypoints, + int maxCount=0, bool scaleCoords=true) const; + void operator()(const vector& pyr, + CV_OUT vector& keypoints, + int maxCount=0, bool scaleCoords=true) const; + void getMostStable2D(const Mat& image, CV_OUT vector& keypoints, + int maxCount, const PatchGenerator& patchGenerator) const; + void setVerbose(bool verbose); + + void read(const FileNode& node); + void write(FileStorage& fs, const String& name=String()) const; + + int radius; + int threshold; + int nOctaves; + int nViews; + bool verbose; + + double baseFeatureSize; + double clusteringDistance; +}; + +typedef LDetector YAPE; + +class CV_EXPORTS FernClassifier +{ +public: + FernClassifier(); + FernClassifier(const FileNode& node); + FernClassifier(const vector >& points, + const vector& refimgs, + const vector >& labels=vector >(), + int _nclasses=0, int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual ~FernClassifier(); + virtual void read(const FileNode& n); + virtual void write(FileStorage& fs, const String& name=String()) const; + virtual void trainFromSingleView(const Mat& image, + const vector& keypoints, + int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual void train(const vector >& points, + const vector& refimgs, + const vector >& labels=vector >(), + int _nclasses=0, int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual int operator()(const Mat& img, Point2f kpt, vector& signature) const; + virtual int operator()(const Mat& patch, vector& signature) const; + virtual void clear(); + virtual bool empty() const; + void setVerbose(bool verbose); + + int getClassCount() const; + int getStructCount() const; + int getStructSize() const; + int getSignatureSize() const; + int getCompressionMethod() const; + Size getPatchSize() const; + + struct Feature + { + uchar x1, y1, x2, y2; + Feature() : x1(0), y1(0), x2(0), y2(0) {} + Feature(int _x1, int _y1, int _x2, int _y2) + : x1((uchar)_x1), y1((uchar)_y1), x2((uchar)_x2), y2((uchar)_y2) + {} + template bool operator ()(const Mat_<_Tp>& patch) const + { return patch(y1,x1) > patch(y2, x2); } + }; + + enum + { + PATCH_SIZE = 31, + DEFAULT_STRUCTS = 50, + DEFAULT_STRUCT_SIZE = 9, + DEFAULT_VIEWS = 5000, + DEFAULT_SIGNATURE_SIZE = 176, + COMPRESSION_NONE = 0, + COMPRESSION_RANDOM_PROJ = 1, + COMPRESSION_PCA = 2, + DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE + }; + +protected: + virtual void prepare(int _nclasses, int _patchSize, int _signatureSize, + int _nstructs, int _structSize, + int _nviews, int _compressionMethod); + virtual void finalize(RNG& rng); + virtual int getLeaf(int fidx, const Mat& patch) const; + + bool verbose; + int nstructs; + int structSize; + int nclasses; + int signatureSize; + int compressionMethod; + int leavesPerStruct; + Size patchSize; + vector features; + vector classCounters; + vector posteriors; +}; + + +/****************************************************************************************\ + * Calonder Classifier * + \****************************************************************************************/ + +struct RTreeNode; + +struct CV_EXPORTS BaseKeypoint +{ + int x; + int y; + IplImage* image; + + BaseKeypoint() + : x(0), y(0), image(NULL) + {} + + BaseKeypoint(int _x, int _y, IplImage* _image) + : x(_x), y(_y), image(_image) + {} +}; + +class CV_EXPORTS RandomizedTree +{ +public: + friend class RTreeClassifier; + + static const uchar PATCH_SIZE = 32; + static const int DEFAULT_DEPTH = 9; + static const int DEFAULT_VIEWS = 5000; + static const size_t DEFAULT_REDUCED_NUM_DIM = 176; + static float GET_LOWER_QUANT_PERC() { return .03f; } + static float GET_UPPER_QUANT_PERC() { return .92f; } + + RandomizedTree(); + ~RandomizedTree(); + + void train(vector const& base_set, RNG &rng, + int depth, int views, size_t reduced_num_dim, int num_quant_bits); + void train(vector const& base_set, RNG &rng, + PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim, + int num_quant_bits); + + // following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do) + static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0); + static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst); + + // patch_data must be a 32x32 array (no row padding) + float* getPosterior(uchar* patch_data); + const float* getPosterior(uchar* patch_data) const; + uchar* getPosterior2(uchar* patch_data); + const uchar* getPosterior2(uchar* patch_data) const; + + void read(const char* file_name, int num_quant_bits); + void read(std::istream &is, int num_quant_bits); + void write(const char* file_name) const; + void write(std::ostream &os) const; + + int classes() { return classes_; } + int depth() { return depth_; } + + //void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; } + void discardFloatPosteriors() { freePosteriors(1); } + + inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); } + + // debug + void savePosteriors(std::string url, bool append=false); + void savePosteriors2(std::string url, bool append=false); + +private: + int classes_; + int depth_; + int num_leaves_; + vector nodes_; + float **posteriors_; // 16-bytes aligned posteriors + uchar **posteriors2_; // 16-bytes aligned posteriors + vector leaf_counts_; + + void createNodes(int num_nodes, RNG &rng); + void allocPosteriorsAligned(int num_leaves, int num_classes); + void freePosteriors(int which); // which: 1=posteriors_, 2=posteriors2_, 3=both + void init(int classes, int depth, RNG &rng); + void addExample(int class_id, uchar* patch_data); + void finalize(size_t reduced_num_dim, int num_quant_bits); + int getIndex(uchar* patch_data) const; + inline float* getPosteriorByIndex(int index); + inline const float* getPosteriorByIndex(int index) const; + inline uchar* getPosteriorByIndex2(int index); + inline const uchar* getPosteriorByIndex2(int index) const; + //void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim); + void convertPosteriorsToChar(); + void makePosteriors2(int num_quant_bits); + void compressLeaves(size_t reduced_num_dim); + void estimateQuantPercForPosteriors(float perc[2]); +}; + + +inline uchar* getData(IplImage* image) +{ + return reinterpret_cast(image->imageData); +} + +inline float* RandomizedTree::getPosteriorByIndex(int index) +{ + return const_cast(const_cast(this)->getPosteriorByIndex(index)); +} + +inline const float* RandomizedTree::getPosteriorByIndex(int index) const +{ + return posteriors_[index]; +} + +inline uchar* RandomizedTree::getPosteriorByIndex2(int index) +{ + return const_cast(const_cast(this)->getPosteriorByIndex2(index)); +} + +inline const uchar* RandomizedTree::getPosteriorByIndex2(int index) const +{ + return posteriors2_[index]; +} + +struct CV_EXPORTS RTreeNode +{ + short offset1, offset2; + + RTreeNode() {} + RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2) + : offset1(y1*RandomizedTree::PATCH_SIZE + x1), + offset2(y2*RandomizedTree::PATCH_SIZE + x2) + {} + + //! Left child on 0, right child on 1 + inline bool operator() (uchar* patch_data) const + { + return patch_data[offset1] > patch_data[offset2]; + } +}; + +class CV_EXPORTS RTreeClassifier +{ +public: + static const int DEFAULT_TREES = 48; + static const size_t DEFAULT_NUM_QUANT_BITS = 4; + + RTreeClassifier(); + void train(vector const& base_set, + RNG &rng, + int num_trees = RTreeClassifier::DEFAULT_TREES, + int depth = RandomizedTree::DEFAULT_DEPTH, + int views = RandomizedTree::DEFAULT_VIEWS, + size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM, + int num_quant_bits = DEFAULT_NUM_QUANT_BITS); + void train(vector const& base_set, + RNG &rng, + PatchGenerator &make_patch, + int num_trees = RTreeClassifier::DEFAULT_TREES, + int depth = RandomizedTree::DEFAULT_DEPTH, + int views = RandomizedTree::DEFAULT_VIEWS, + size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM, + int num_quant_bits = DEFAULT_NUM_QUANT_BITS); + + // sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes + void getSignature(IplImage *patch, uchar *sig) const; + void getSignature(IplImage *patch, float *sig) const; + void getSparseSignature(IplImage *patch, float *sig, float thresh) const; + // TODO: deprecated in favor of getSignature overload, remove + void getFloatSignature(IplImage *patch, float *sig) const { getSignature(patch, sig); } + + static int countNonZeroElements(float *vec, int n, double tol=1e-10); + static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176); + static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176); + + inline int classes() const { return classes_; } + inline int original_num_classes() const { return original_num_classes_; } + + void setQuantization(int num_quant_bits); + void discardFloatPosteriors(); + + void read(const char* file_name); + void read(std::istream &is); + void write(const char* file_name) const; + void write(std::ostream &os) const; + + // experimental and debug + void saveAllFloatPosteriors(std::string file_url); + void saveAllBytePosteriors(std::string file_url); + void setFloatPosteriorsFromTextfile_176(std::string url); + float countZeroElements(); + + vector trees_; + +private: + int classes_; + int num_quant_bits_; + mutable uchar **posteriors_; + mutable unsigned short *ptemp_; + int original_num_classes_; + bool keep_floats_; +}; + +/****************************************************************************************\ +* One-Way Descriptor * +\****************************************************************************************/ + +// CvAffinePose: defines a parameterized affine transformation of an image patch. +// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times +// along horizontal and lambda2 times along vertical direction, and then rotated again +// on angle (theta - phi). +class CV_EXPORTS CvAffinePose +{ +public: + float phi; + float theta; + float lambda1; + float lambda2; +}; + +class CV_EXPORTS OneWayDescriptor +{ +public: + OneWayDescriptor(); + ~OneWayDescriptor(); + + // allocates memory for given descriptor parameters + void Allocate(int pose_count, CvSize size, int nChannels); + + // GenerateSamples: generates affine transformed patches with averaging them over small transformation variations. + // If external poses and transforms were specified, uses them instead of generating random ones + // - pose_count: the number of poses to be generated + // - frontal: the input patch (can be a roi in a larger image) + // - norm: if nonzero, normalizes the output patch so that the sum of pixel intensities is 1 + void GenerateSamples(int pose_count, IplImage* frontal, int norm = 0); + + // GenerateSamplesFast: generates affine transformed patches with averaging them over small transformation variations. + // Uses precalculated transformed pca components. + // - frontal: the input patch (can be a roi in a larger image) + // - pca_hr_avg: pca average vector + // - pca_hr_eigenvectors: pca eigenvectors + // - pca_descriptors: an array of precomputed descriptors of pca components containing their affine transformations + // pca_descriptors[0] corresponds to the average, pca_descriptors[1]-pca_descriptors[pca_dim] correspond to eigenvectors + void GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg, + CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors); + + // sets the poses and corresponding transforms + void SetTransforms(CvAffinePose* poses, CvMat** transforms); + + // Initialize: builds a descriptor. + // - pose_count: the number of poses to build. If poses were set externally, uses them rather than generating random ones + // - frontal: input patch. Can be a roi in a larger image + // - feature_name: the feature name to be associated with the descriptor + // - norm: if 1, the affine transformed patches are normalized so that their sum is 1 + void Initialize(int pose_count, IplImage* frontal, const char* feature_name = 0, int norm = 0); + + // InitializeFast: builds a descriptor using precomputed descriptors of pca components + // - pose_count: the number of poses to build + // - frontal: input patch. Can be a roi in a larger image + // - feature_name: the feature name to be associated with the descriptor + // - pca_hr_avg: average vector for PCA + // - pca_hr_eigenvectors: PCA eigenvectors (one vector per row) + // - pca_descriptors: precomputed descriptors of PCA components, the first descriptor for the average vector + // followed by the descriptors for eigenvectors + void InitializeFast(int pose_count, IplImage* frontal, const char* feature_name, + CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors); + + // ProjectPCASample: unwarps an image patch into a vector and projects it into PCA space + // - patch: input image patch + // - avg: PCA average vector + // - eigenvectors: PCA eigenvectors, one per row + // - pca_coeffs: output PCA coefficients + void ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const; + + // InitializePCACoeffs: projects all warped patches into PCA space + // - avg: PCA average vector + // - eigenvectors: PCA eigenvectors, one per row + void InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors); + + // EstimatePose: finds the closest match between an input patch and a set of patches with different poses + // - patch: input image patch + // - pose_idx: the output index of the closest pose + // - distance: the distance to the closest pose (L2 distance) + void EstimatePose(IplImage* patch, int& pose_idx, float& distance) const; + + // EstimatePosePCA: finds the closest match between an input patch and a set of patches with different poses. + // The distance between patches is computed in PCA space + // - patch: input image patch + // - pose_idx: the output index of the closest pose + // - distance: distance to the closest pose (L2 distance in PCA space) + // - avg: PCA average vector. If 0, matching without PCA is used + // - eigenvectors: PCA eigenvectors, one per row + void EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvalues) const; + + // GetPatchSize: returns the size of each image patch after warping (2 times smaller than the input patch) + CvSize GetPatchSize() const + { + return m_patch_size; + } + + // GetInputPatchSize: returns the required size of the patch that the descriptor is built from + // (2 time larger than the patch after warping) + CvSize GetInputPatchSize() const + { + return cvSize(m_patch_size.width*2, m_patch_size.height*2); + } + + // GetPatch: returns a patch corresponding to specified pose index + // - index: pose index + // - return value: the patch corresponding to specified pose index + IplImage* GetPatch(int index); + + // GetPose: returns a pose corresponding to specified pose index + // - index: pose index + // - return value: the pose corresponding to specified pose index + CvAffinePose GetPose(int index) const; + + // Save: saves all patches with different poses to a specified path + void Save(const char* path); + + // ReadByName: reads a descriptor from a file storage + // - fs: file storage + // - parent: parent node + // - name: node name + // - return value: 1 if succeeded, 0 otherwise + int ReadByName(CvFileStorage* fs, CvFileNode* parent, const char* name); + + // ReadByName: reads a descriptor from a file node + // - parent: parent node + // - name: node name + // - return value: 1 if succeeded, 0 otherwise + int ReadByName(const FileNode &parent, const char* name); + + // Write: writes a descriptor into a file storage + // - fs: file storage + // - name: node name + void Write(CvFileStorage* fs, const char* name); + + // GetFeatureName: returns a name corresponding to a feature + const char* GetFeatureName() const; + + // GetCenter: returns the center of the feature + CvPoint GetCenter() const; + + void SetPCADimHigh(int pca_dim_high) {m_pca_dim_high = pca_dim_high;}; + void SetPCADimLow(int pca_dim_low) {m_pca_dim_low = pca_dim_low;}; + + int GetPCADimLow() const; + int GetPCADimHigh() const; + + CvMat** GetPCACoeffs() const {return m_pca_coeffs;} + +protected: + int m_pose_count; // the number of poses + CvSize m_patch_size; // size of each image + IplImage** m_samples; // an array of length m_pose_count containing the patch in different poses + IplImage* m_input_patch; + IplImage* m_train_patch; + CvMat** m_pca_coeffs; // an array of length m_pose_count containing pca decomposition of the patch in different poses + CvAffinePose* m_affine_poses; // an array of poses + CvMat** m_transforms; // an array of affine transforms corresponding to poses + + string m_feature_name; // the name of the feature associated with the descriptor + CvPoint m_center; // the coordinates of the feature (the center of the input image ROI) + + int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses + int m_pca_dim_low; // the number of pca components to use for comparison +}; + + +// OneWayDescriptorBase: encapsulates functionality for training/loading a set of one way descriptors +// and finding the nearest closest descriptor to an input feature +class CV_EXPORTS OneWayDescriptorBase +{ +public: + + // creates an instance of OneWayDescriptor from a set of training files + // - patch_size: size of the input (large) patch + // - pose_count: the number of poses to generate for each descriptor + // - train_path: path to training files + // - pca_config: the name of the file that contains PCA for small patches (2 times smaller + // than patch_size each dimension + // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size) + // - pca_desc_config: the name of the file that contains descriptors of PCA components + OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0, + const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1, + int pca_dim_high = 100, int pca_dim_low = 100); + + OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(), + float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1, + int pca_dim_high = 100, int pca_dim_low = 100); + + + virtual ~OneWayDescriptorBase(); + void clear (); + + + // Allocate: allocates memory for a given number of descriptors + void Allocate(int train_feature_count); + + // AllocatePCADescriptors: allocates memory for pca descriptors + void AllocatePCADescriptors(); + + // returns patch size + CvSize GetPatchSize() const {return m_patch_size;}; + // returns the number of poses for each descriptor + int GetPoseCount() const {return m_pose_count;}; + + // returns the number of pyramid levels + int GetPyrLevels() const {return m_pyr_levels;}; + + // returns the number of descriptors + int GetDescriptorCount() const {return m_train_feature_count;}; + + // CreateDescriptorsFromImage: creates descriptors for each of the input features + // - src: input image + // - features: input features + // - pyr_levels: the number of pyramid levels + void CreateDescriptorsFromImage(IplImage* src, const vector& features); + + // CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors + void CreatePCADescriptors(); + + // returns a feature descriptor by feature index + const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];}; + + // FindDescriptor: finds the closest descriptor + // - patch: input image patch + // - desc_idx: output index of the closest descriptor to the input patch + // - pose_idx: output index of the closest pose of the closest descriptor to the input patch + // - distance: distance from the input patch to the closest feature pose + // - _scales: scales of the input patch for each descriptor + // - scale_ranges: input scales variation (float[2]) + void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const; + + // - patch: input image patch + // - n: number of the closest indexes + // - desc_idxs: output indexes of the closest descriptor to the input patch (n) + // - pose_idx: output indexes of the closest pose of the closest descriptor to the input patch (n) + // - distances: distance from the input patch to the closest feature pose (n) + // - _scales: scales of the input patch + // - scale_ranges: input scales variation (float[2]) + void FindDescriptor(IplImage* patch, int n, vector& desc_idxs, vector& pose_idxs, + vector& distances, vector& _scales, float* scale_ranges = 0) const; + + // FindDescriptor: finds the closest descriptor + // - src: input image + // - pt: center of the feature + // - desc_idx: output index of the closest descriptor to the input patch + // - pose_idx: output index of the closest pose of the closest descriptor to the input patch + // - distance: distance from the input patch to the closest feature pose + void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const; + + // InitializePoses: generates random poses + void InitializePoses(); + + // InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms) + void InitializeTransformsFromPoses(); + + // InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses + void InitializePoseTransforms(); + + // InitializeDescriptor: initializes a descriptor + // - desc_idx: descriptor index + // - train_image: image patch (ROI is supported) + // - feature_label: feature textual label + void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label); + + void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label); + + // InitializeDescriptors: load features from an image and create descriptors for each of them + void InitializeDescriptors(IplImage* train_image, const vector& features, + const char* feature_label = "", int desc_start_idx = 0); + + // Write: writes this object to a file storage + // - fs: output filestorage + void Write (FileStorage &fs) const; + + // Read: reads OneWayDescriptorBase object from a file node + // - fn: input file node + void Read (const FileNode &fn); + + // LoadPCADescriptors: loads PCA descriptors from a file + // - filename: input filename + int LoadPCADescriptors(const char* filename); + + // LoadPCADescriptors: loads PCA descriptors from a file node + // - fn: input file node + int LoadPCADescriptors(const FileNode &fn); + + // SavePCADescriptors: saves PCA descriptors to a file + // - filename: output filename + void SavePCADescriptors(const char* filename); + + // SavePCADescriptors: saves PCA descriptors to a file storage + // - fs: output file storage + void SavePCADescriptors(CvFileStorage* fs) const; + + // GeneratePCA: calculate and save PCA components and descriptors + // - img_path: path to training PCA images directory + // - images_list: filename with filenames of training PCA images + void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500); + + // SetPCAHigh: sets the high resolution pca matrices (copied to internal structures) + void SetPCAHigh(CvMat* avg, CvMat* eigenvectors); + + // SetPCALow: sets the low resolution pca matrices (copied to internal structures) + void SetPCALow(CvMat* avg, CvMat* eigenvectors); + + int GetLowPCA(CvMat** avg, CvMat** eigenvectors) + { + *avg = m_pca_avg; + *eigenvectors = m_pca_eigenvectors; + return m_pca_dim_low; + }; + + int GetPCADimLow() const {return m_pca_dim_low;}; + int GetPCADimHigh() const {return m_pca_dim_high;}; + + void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree + + // GetPCAFilename: get default PCA filename + static string GetPCAFilename () { return "pca.yml"; } + + virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; } + +protected: + CvSize m_patch_size; // patch size + int m_pose_count; // the number of poses for each descriptor + int m_train_feature_count; // the number of the training features + OneWayDescriptor* m_descriptors; // array of train feature descriptors + CvMat* m_pca_avg; // PCA average Vector for small patches + CvMat* m_pca_eigenvectors; // PCA eigenvectors for small patches + CvMat* m_pca_hr_avg; // PCA average Vector for large patches + CvMat* m_pca_hr_eigenvectors; // PCA eigenvectors for large patches + OneWayDescriptor* m_pca_descriptors; // an array of PCA descriptors + + cv::flann::Index* m_pca_descriptors_tree; + CvMat* m_pca_descriptors_matrix; + + CvAffinePose* m_poses; // array of poses + CvMat** m_transforms; // array of affine transformations corresponding to poses + + int m_pca_dim_high; + int m_pca_dim_low; + + int m_pyr_levels; + float scale_min; + float scale_max; + float scale_step; + + // SavePCAall: saves PCA components and descriptors to a file storage + // - fs: output file storage + void SavePCAall (FileStorage &fs) const; + + // LoadPCAall: loads PCA components and descriptors from a file node + // - fn: input file node + void LoadPCAall (const FileNode &fn); +}; + +class CV_EXPORTS OneWayDescriptorObject : public OneWayDescriptorBase +{ +public: + // creates an instance of OneWayDescriptorObject from a set of training files + // - patch_size: size of the input (large) patch + // - pose_count: the number of poses to generate for each descriptor + // - train_path: path to training files + // - pca_config: the name of the file that contains PCA for small patches (2 times smaller + // than patch_size each dimension + // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size) + // - pca_desc_config: the name of the file that contains descriptors of PCA components + OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path, const char* pca_config, + const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1); + + OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename, + const string &train_path = string (), const string &images_list = string (), + float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1); + + + virtual ~OneWayDescriptorObject(); + + // Allocate: allocates memory for a given number of features + // - train_feature_count: the total number of features + // - object_feature_count: the number of features extracted from the object + void Allocate(int train_feature_count, int object_feature_count); + + + void SetLabeledFeatures(const vector& features) {m_train_features = features;}; + vector& GetLabeledFeatures() {return m_train_features;}; + const vector& GetLabeledFeatures() const {return m_train_features;}; + vector _GetLabeledFeatures() const; + + // IsDescriptorObject: returns 1 if descriptor with specified index is positive, otherwise 0 + int IsDescriptorObject(int desc_idx) const; + + // MatchPointToPart: returns the part number of a feature if it matches one of the object parts, otherwise -1 + int MatchPointToPart(CvPoint pt) const; + + // GetDescriptorPart: returns the part number of the feature corresponding to a specified descriptor + // - desc_idx: descriptor index + int GetDescriptorPart(int desc_idx) const; + + + void InitializeObjectDescriptors(IplImage* train_image, const vector& features, + const char* feature_label, int desc_start_idx = 0, float scale = 1.0f, + int is_background = 0); + + // GetObjectFeatureCount: returns the number of object features + int GetObjectFeatureCount() const {return m_object_feature_count;}; + +protected: + int* m_part_id; // contains part id for each of object descriptors + vector m_train_features; // train features + int m_object_feature_count; // the number of the positive features + +}; + + +/* + * OneWayDescriptorMatcher + */ +class OneWayDescriptorMatcher; +typedef OneWayDescriptorMatcher OneWayDescriptorMatch; + +class CV_EXPORTS OneWayDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + class CV_EXPORTS Params + { + public: + static const int POSE_COUNT = 500; + static const int PATCH_WIDTH = 24; + static const int PATCH_HEIGHT = 24; + static float GET_MIN_SCALE() { return 0.7f; } + static float GET_MAX_SCALE() { return 1.5f; } + static float GET_STEP_SCALE() { return 1.2f; } + + Params( int poseCount = POSE_COUNT, + Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT), + string pcaFilename = string(), + string trainPath = string(), string trainImagesList = string(), + float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(), + float stepScale = GET_STEP_SCALE() ); + + int poseCount; + Size patchSize; + string pcaFilename; + string trainPath; + string trainImagesList; + + float minScale, maxScale, stepScale; + }; + + OneWayDescriptorMatcher( const Params& params=Params() ); + virtual ~OneWayDescriptorMatcher(); + + void initialize( const Params& params, const Ptr& base=Ptr() ); + + // Clears keypoints storing in collection and OneWayDescriptorBase + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage& fs ) const; + + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + // Matches a set of keypoints from a single image of the training set. A rectangle with a center in a keypoint + // and size (patch_width/2*scale, patch_height/2*scale) is cropped from the source image for each + // keypoint. scale is iterated from DescriptorOneWayParams::min_scale to DescriptorOneWayParams::max_scale. + // The minimum distance to each training patch with all its affine poses is found over all scales. + // The class ID of a match is returned for each keypoint. The distance is calculated over PCA components + // loaded with DescriptorOneWay::Initialize, kd tree is used for finding minimum distances. + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + Ptr base; + Params params; + int prevTrainCount; +}; + +/* + * FernDescriptorMatcher + */ +class FernDescriptorMatcher; +typedef FernDescriptorMatcher FernDescriptorMatch; + +class CV_EXPORTS FernDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + class CV_EXPORTS Params + { + public: + Params( int nclasses=0, + int patchSize=FernClassifier::PATCH_SIZE, + int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE, + int nstructs=FernClassifier::DEFAULT_STRUCTS, + int structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int nviews=FernClassifier::DEFAULT_VIEWS, + int compressionMethod=FernClassifier::COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator() ); + + Params( const string& filename ); + + int nclasses; + int patchSize; + int signatureSize; + int nstructs; + int structSize; + int nviews; + int compressionMethod; + PatchGenerator patchGenerator; + + string filename; + }; + + FernDescriptorMatcher( const Params& params=Params() ); + virtual ~FernDescriptorMatcher(); + + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage& fs ) const; + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + void trainFernClassifier(); + void calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt, + float& bestProb, int& bestMatchIdx, vector& signature ); + Ptr classifier; + Params params; + int prevTrainCount; +}; + + +/* + * CalonderDescriptorExtractor + */ +template +class CV_EXPORTS CalonderDescriptorExtractor : public DescriptorExtractor +{ +public: + CalonderDescriptorExtractor( const string& classifierFile ); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage &fs ) const; + + virtual int descriptorSize() const { return classifier_.classes(); } + virtual int descriptorType() const { return DataType::type; } + + virtual bool empty() const; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + RTreeClassifier classifier_; + static const int BORDER_SIZE = 16; +}; + +template +CalonderDescriptorExtractor::CalonderDescriptorExtractor(const std::string& classifier_file) +{ + classifier_.read( classifier_file.c_str() ); +} + +template +void CalonderDescriptorExtractor::computeImpl( const Mat& image, + vector& keypoints, + Mat& descriptors) const +{ + // Cannot compute descriptors for keypoints on the image border. + KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE); + + /// @todo Check 16-byte aligned + descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType::type); + + int patchSize = RandomizedTree::PATCH_SIZE; + int offset = patchSize / 2; + for (size_t i = 0; i < keypoints.size(); ++i) + { + cv::Point2f pt = keypoints[i].pt; + IplImage ipl = image( Rect((int)(pt.x - offset), (int)(pt.y - offset), patchSize, patchSize) ); + classifier_.getSignature( &ipl, descriptors.ptr((int)i)); + } +} + +template +void CalonderDescriptorExtractor::read( const FileNode& ) +{} + +template +void CalonderDescriptorExtractor::write( FileStorage& ) const +{} + +template +bool CalonderDescriptorExtractor::empty() const +{ + return classifier_.trees_.empty(); +} + + +////////////////////// Brute Force Matcher ////////////////////////// + +template +class CV_EXPORTS BruteForceMatcher : public BFMatcher +{ +public: + BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {(void)d;} + virtual ~BruteForceMatcher() {} +}; + + +/****************************************************************************************\ +* Planar Object Detection * +\****************************************************************************************/ + +class CV_EXPORTS PlanarObjectDetector +{ +public: + PlanarObjectDetector(); + PlanarObjectDetector(const FileNode& node); + PlanarObjectDetector(const vector& pyr, int _npoints=300, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual ~PlanarObjectDetector(); + virtual void train(const vector& pyr, int _npoints=300, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual void train(const vector& pyr, const vector& keypoints, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + Rect getModelROI() const; + vector getModelPoints() const; + const LDetector& getDetector() const; + const FernClassifier& getClassifier() const; + void setVerbose(bool verbose); + + void read(const FileNode& node); + void write(FileStorage& fs, const String& name=String()) const; + bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector& corners) const; + bool operator()(const vector& pyr, const vector& keypoints, + CV_OUT Mat& H, CV_OUT vector& corners, + CV_OUT vector* pairs=0) const; + +protected: + bool verbose; + Rect modelROI; + vector modelPoints; + LDetector ldetector; + FernClassifier fernClassifier; +}; + +} + +// 2009-01-12, Xavier Delacour + +struct lsh_hash { + int h1, h2; +}; + +struct CvLSHOperations +{ + virtual ~CvLSHOperations() {} + + virtual int vector_add(const void* data) = 0; + virtual void vector_remove(int i) = 0; + virtual const void* vector_lookup(int i) = 0; + virtual void vector_reserve(int n) = 0; + virtual unsigned int vector_count() = 0; + + virtual void hash_insert(lsh_hash h, int l, int i) = 0; + virtual void hash_remove(lsh_hash h, int l, int i) = 0; + virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0; +}; + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Splits color or grayscale image into multiple connected components + of nearly the same color/brightness using modification of Burt algorithm. + comp with contain a pointer to sequence (CvSeq) + of connected components (CvConnectedComp) */ +CVAPI(void) cvPyrSegmentation( IplImage* src, IplImage* dst, + CvMemStorage* storage, CvSeq** comp, + int level, double threshold1, + double threshold2 ); + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +/* Initializes Delaunay triangulation */ +CVAPI(void) cvInitSubdivDelaunay2D( CvSubdiv2D* subdiv, CvRect rect ); + +/* Creates new subdivision */ +CVAPI(CvSubdiv2D*) cvCreateSubdiv2D( int subdiv_type, int header_size, + int vtx_size, int quadedge_size, + CvMemStorage* storage ); + +/************************* high-level subdivision functions ***************************/ + +/* Simplified Delaunay diagram creation */ +CV_INLINE CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage ) +{ + CvSubdiv2D* subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv), + sizeof(CvSubdiv2DPoint), sizeof(CvQuadEdge2D), storage ); + + cvInitSubdivDelaunay2D( subdiv, rect ); + return subdiv; +} + + +/* Inserts new point to the Delaunay triangulation */ +CVAPI(CvSubdiv2DPoint*) cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt); + +/* Locates a point within the Delaunay triangulation (finds the edge + the point is left to or belongs to, or the triangulation point the given + point coinsides with */ +CVAPI(CvSubdiv2DPointLocation) cvSubdiv2DLocate( + CvSubdiv2D* subdiv, CvPoint2D32f pt, + CvSubdiv2DEdge* edge, + CvSubdiv2DPoint** vertex CV_DEFAULT(NULL) ); + +/* Calculates Voronoi tesselation (i.e. coordinates of Voronoi points) */ +CVAPI(void) cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv ); + + +/* Removes all Voronoi points from the tesselation */ +CVAPI(void) cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv ); + + +/* Finds the nearest to the given point vertex in subdivision. */ +CVAPI(CvSubdiv2DPoint*) cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt ); + + +/************ Basic quad-edge navigation and operations ************/ + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge ) +{ + return CV_SUBDIV2D_NEXT_EDGE(edge); +} + + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate ) +{ + return (edge & ~3) + ((edge + rotate) & 3); +} + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DSymEdge( CvSubdiv2DEdge edge ) +{ + return edge ^ 2; +} + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + edge = e->next[(edge + (int)type) & 3]; + return (edge & ~3) + ((edge + ((int)type >> 4)) & 3); +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[edge & 3]; +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3]; +} + +/****************************************************************************************\ +* Additional operations on Subdivisions * +\****************************************************************************************/ + +// paints voronoi diagram: just demo function +CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst ); + +// checks planar subdivision for correctness. It is not an absolute check, +// but it verifies some relations between quad-edges +CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv ); + +// returns squared distance between two 2D points with floating-point coordinates. +CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 ) +{ + double dx = pt1.x - pt2.x; + double dy = pt1.y - pt2.y; + + return dx*dx + dy*dy; +} + + + + +CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x); +} + + +/* Constructs kd-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc); + +/* Constructs spill-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data, + const int naive CV_DEFAULT(50), + const double rho CV_DEFAULT(.7), + const double tau CV_DEFAULT(.1) ); + +/* Release feature tree */ +CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr); + +/* Searches feature tree for k nearest neighbors of given reference points, + searching (in case of kd-tree/bbf) at most emax leaves. */ +CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20)); + +/* Search feature tree for all points that are inlier to given rect region. + Only implemented for kd trees */ +CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* out_indices); + + +/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of + given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */ +CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d, + int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Construct in-memory LSH table, with n bins. */ +CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Free the given LSH structure. */ +CVAPI(void) cvReleaseLSH(struct CvLSH** lsh); + +/* Return the number of vectors in the LSH. */ +CVAPI(unsigned int) LSHSize(struct CvLSH* lsh); + +/* Add vectors to the LSH structure, optionally returning indices. */ +CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0)); + +/* Remove vectors from LSH, as addressed by given indices. */ +CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices); + +/* Query the LSH n times for at most k nearest points; data is n x d, + indices and dist are n x k. At most emax stored points will be accessed. */ +CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax); + +/* Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1) */ +#define CV_STEREO_GC_OCCLUDED SHRT_MAX + +typedef struct CvStereoGCState +{ + int Ithreshold; + int interactionRadius; + float K, lambda, lambda1, lambda2; + int occlusionCost; + int minDisparity; + int numberOfDisparities; + int maxIters; + + CvMat* left; + CvMat* right; + CvMat* dispLeft; + CvMat* dispRight; + CvMat* ptrLeft; + CvMat* ptrRight; + CvMat* vtxBuf; + CvMat* edgeBuf; +} CvStereoGCState; + +CVAPI(CvStereoGCState*) cvCreateStereoGCState( int numberOfDisparities, int maxIters ); +CVAPI(void) cvReleaseStereoGCState( CvStereoGCState** state ); + +CVAPI(void) cvFindStereoCorrespondenceGC( const CvArr* left, const CvArr* right, + CvArr* disparityLeft, CvArr* disparityRight, + CvStereoGCState* state, + int useDisparityGuess CV_DEFAULT(0) ); + +/* Calculates optical flow for 2 images using classical Lucas & Kanade algorithm */ +CVAPI(void) cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, + CvSize win_size, CvArr* velx, CvArr* vely ); + +/* Calculates optical flow for 2 images using block matching algorithm */ +CVAPI(void) cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, + CvSize block_size, CvSize shift_size, + CvSize max_range, int use_previous, + CvArr* velx, CvArr* vely ); + +/* Calculates Optical flow for 2 images using Horn & Schunck algorithm */ +CVAPI(void) cvCalcOpticalFlowHS( const CvArr* prev, const CvArr* curr, + int use_previous, CvArr* velx, CvArr* vely, + double lambda, CvTermCriteria criteria ); + + +/****************************************************************************************\ +* Background/foreground segmentation * +\****************************************************************************************/ + +/* We discriminate between foreground and background pixels + * by building and maintaining a model of the background. + * Any pixel which does not fit this model is then deemed + * to be foreground. + * + * At present we support two core background models, + * one of which has two variations: + * + * o CV_BG_MODEL_FGD: latest and greatest algorithm, described in + * + * Foreground Object Detection from Videos Containing Complex Background. + * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. + * ACM MM2003 9p + * + * o CV_BG_MODEL_FGD_SIMPLE: + * A code comment describes this as a simplified version of the above, + * but the code is in fact currently identical + * + * o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in + * + * Moving target classification and tracking from real-time video. + * A Lipton, H Fujijoshi, R Patil + * Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998 + * + * Learning patterns of activity using real-time tracking + * C Stauffer and W Grimson August 2000 + * IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757 + */ + + +#define CV_BG_MODEL_FGD 0 +#define CV_BG_MODEL_MOG 1 /* "Mixture of Gaussians". */ +#define CV_BG_MODEL_FGD_SIMPLE 2 + +struct CvBGStatModel; + +typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model ); +typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model, + double learningRate ); + +#define CV_BG_STAT_MODEL_FIELDS() \ +int type; /*type of BG model*/ \ +CvReleaseBGStatModel release; \ +CvUpdateBGStatModel update; \ +IplImage* background; /*8UC3 reference background image*/ \ +IplImage* foreground; /*8UC1 foreground image*/ \ +IplImage** layers; /*8UC3 reference background image, can be null */ \ +int layer_count; /* can be zero */ \ +CvMemStorage* storage; /*storage for foreground_regions*/ \ +CvSeq* foreground_regions /*foreground object contours*/ + +typedef struct CvBGStatModel +{ + CV_BG_STAT_MODEL_FIELDS(); +} CvBGStatModel; + +// + +// Releases memory used by BGStatModel +CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model ); + +// Updates statistical model and returns number of found foreground regions +CVAPI(int) cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model, + double learningRate CV_DEFAULT(-1)); + +// Performs FG post-processing using segmentation +// (all pixels of a region will be classified as foreground if majority of pixels of the region are FG). +// parameters: +// segments - pointer to result of segmentation (for example MeanShiftSegmentation) +// bg_model - pointer to CvBGStatModel structure +CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel* bg_model ); + +/* Common use change detection function */ +CVAPI(int) cvChangeDetection( IplImage* prev_frame, + IplImage* curr_frame, + IplImage* change_mask ); + +/* + Interface of ACM MM2003 algorithm + */ + +/* Default parameters of foreground detection algorithm: */ +#define CV_BGFG_FGD_LC 128 +#define CV_BGFG_FGD_N1C 15 +#define CV_BGFG_FGD_N2C 25 + +#define CV_BGFG_FGD_LCC 64 +#define CV_BGFG_FGD_N1CC 25 +#define CV_BGFG_FGD_N2CC 40 + +/* Background reference image update parameter: */ +#define CV_BGFG_FGD_ALPHA_1 0.1f + +/* stat model update parameter + * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG) + */ +#define CV_BGFG_FGD_ALPHA_2 0.005f + +/* start value for alpha parameter (to fast initiate statistic model) */ +#define CV_BGFG_FGD_ALPHA_3 0.1f + +#define CV_BGFG_FGD_DELTA 2 + +#define CV_BGFG_FGD_T 0.9f + +#define CV_BGFG_FGD_MINAREA 15.f + +#define CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f + +/* See the above-referenced Li/Huang/Gu/Tian paper + * for a full description of these background-model + * tuning parameters. + * + * Nomenclature: 'c' == "color", a three-component red/green/blue vector. + * We use histograms of these to model the range of + * colors we've seen at a given background pixel. + * + * 'cc' == "color co-occurrence", a six-component vector giving + * RGB color for both this frame and preceding frame. + * We use histograms of these to model the range of + * color CHANGES we've seen at a given background pixel. + */ +typedef struct CvFGDStatModelParams +{ + int Lc; /* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. */ + int N1c; /* Number of color vectors used to model normal background color variation at a given pixel. */ + int N2c; /* Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. */ + /* Used to allow the first N1c vectors to adapt over time to changing background. */ + + int Lcc; /* Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. */ + int N1cc; /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel. */ + int N2cc; /* Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. */ + /* Used to allow the first N1cc vectors to adapt over time to changing background. */ + + int is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE. */ + int perform_morphing; /* Number of erode-dilate-erode foreground-blob cleanup iterations. */ + /* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. */ + + float alpha1; /* How quickly we forget old background pixel values seen. Typically set to 0.1 */ + float alpha2; /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. */ + float alpha3; /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. */ + + float delta; /* Affects color and color co-occurrence quantization, typically set to 2. */ + float T; /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/ + float minArea; /* Discard foreground blobs whose bounding box is smaller than this threshold. */ +} CvFGDStatModelParams; + +typedef struct CvBGPixelCStatTable +{ + float Pv, Pvb; + uchar v[3]; +} CvBGPixelCStatTable; + +typedef struct CvBGPixelCCStatTable +{ + float Pv, Pvb; + uchar v[6]; +} CvBGPixelCCStatTable; + +typedef struct CvBGPixelStat +{ + float Pbc; + float Pbcc; + CvBGPixelCStatTable* ctable; + CvBGPixelCCStatTable* cctable; + uchar is_trained_st_model; + uchar is_trained_dyn_model; +} CvBGPixelStat; + + +typedef struct CvFGDStatModel +{ + CV_BG_STAT_MODEL_FIELDS(); + CvBGPixelStat* pixel_stat; + IplImage* Ftd; + IplImage* Fbd; + IplImage* prev_frame; + CvFGDStatModelParams params; +} CvFGDStatModel; + +/* Creates FGD model */ +CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame, + CvFGDStatModelParams* parameters CV_DEFAULT(NULL)); + +/* + Interface of Gaussian mixture algorithm + + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf + */ + +/* Note: "MOG" == "Mixture Of Gaussians": */ + +#define CV_BGFG_MOG_MAX_NGAUSSIANS 500 + +/* default parameters of gaussian background detection algorithm */ +#define CV_BGFG_MOG_BACKGROUND_THRESHOLD 0.7 /* threshold sum of weights for background test */ +#define CV_BGFG_MOG_STD_THRESHOLD 2.5 /* lambda=2.5 is 99% */ +#define CV_BGFG_MOG_WINDOW_SIZE 200 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */ +#define CV_BGFG_MOG_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */ +#define CV_BGFG_MOG_WEIGHT_INIT 0.05 +#define CV_BGFG_MOG_SIGMA_INIT 30 +#define CV_BGFG_MOG_MINAREA 15.f + + +#define CV_BGFG_MOG_NCOLORS 3 + +typedef struct CvGaussBGStatModelParams +{ + int win_size; /* = 1/alpha */ + int n_gauss; + double bg_threshold, std_threshold, minArea; + double weight_init, variance_init; +}CvGaussBGStatModelParams; + +typedef struct CvGaussBGValues +{ + int match_sum; + double weight; + double variance[CV_BGFG_MOG_NCOLORS]; + double mean[CV_BGFG_MOG_NCOLORS]; +} CvGaussBGValues; + +typedef struct CvGaussBGPoint +{ + CvGaussBGValues* g_values; +} CvGaussBGPoint; + + +typedef struct CvGaussBGModel +{ + CV_BG_STAT_MODEL_FIELDS(); + CvGaussBGStatModelParams params; + CvGaussBGPoint* g_point; + int countFrames; + void* mog; +} CvGaussBGModel; + + +/* Creates Gaussian mixture background model */ +CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame, + CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL)); + + +typedef struct CvBGCodeBookElem +{ + struct CvBGCodeBookElem* next; + int tLastUpdate; + int stale; + uchar boxMin[3]; + uchar boxMax[3]; + uchar learnMin[3]; + uchar learnMax[3]; +} CvBGCodeBookElem; + +typedef struct CvBGCodeBookModel +{ + CvSize size; + int t; + uchar cbBounds[3]; + uchar modMin[3]; + uchar modMax[3]; + CvBGCodeBookElem** cbmap; + CvMemStorage* storage; + CvBGCodeBookElem* freeList; +} CvBGCodeBookModel; + +CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void ); +CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model ); + +CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image, + CvRect roi CV_DEFAULT(cvRect(0,0,0,0)), + const CvArr* mask CV_DEFAULT(0) ); + +CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image, + CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) ); + +CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh, + CvRect roi CV_DEFAULT(cvRect(0,0,0,0)), + const CvArr* mask CV_DEFAULT(0) ); + +CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1), + float perimScale CV_DEFAULT(4.f), + CvMemStorage* storage CV_DEFAULT(0), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +#ifdef __cplusplus +} +#endif + +#endif + +/* End of file. */ diff --git a/OpenCV/Headers/legacy/streams.hpp b/OpenCV/Headers/legacy/streams.hpp new file mode 100644 index 0000000000..6935b00c3d --- /dev/null +++ b/OpenCV/Headers/legacy/streams.hpp @@ -0,0 +1,93 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CVSTREAMS_H__ +#define __OPENCV_CVSTREAMS_H__ + +#ifdef WIN32 +#include /* !!! IF YOU'VE GOT AN ERROR HERE, PLEASE READ BELOW !!! */ +/***************** How to get Visual Studio understand streams.h ****************\ + +You need DirectShow SDK that is now a part of Platform SDK +(Windows Server 2003 SP1 SDK or later), +and DirectX SDK (2006 April or later). + +1. Download the Platform SDK from + http://www.microsoft.com/msdownload/platformsdk/sdkupdate/ + and DirectX SDK from msdn.microsoft.com/directx/ + (They are huge, but you can download it by parts). + If it doesn't work for you, consider HighGUI that can capture video via VFW or MIL + +2. Install Platform SDK together with DirectShow SDK. + Install DirectX (with or without sample code). + +3. Build baseclasses. + See \samples\multimedia\directshow\readme.txt. + +4. Copy the built libraries (called strmbase.lib and strmbasd.lib + in Release and Debug versions, respectively) to + \lib. + +5. In Developer Studio add the following paths: + \include + \include + \samples\multimedia\directshow\baseclasses + to the includes' search path + (at Tools->Options->Directories->Include files in case of Visual Studio 6.0, + at Tools->Options->Projects and Solutions->VC++ Directories->Include files in case + of Visual Studio 2005) + Add + \lib + \lib + to the libraries' search path (in the same dialog, ...->"Library files" page) + + NOTE: PUT THE ADDED LINES ON THE VERY TOP OF THE LISTS, OTHERWISE YOU MAY STILL GET + COMPILER OR LINKER ERRORS. This is necessary, because Visual Studio + may include older versions of the same headers and libraries. + +6. Now you can build OpenCV DirectShow filters. + +\***********************************************************************************/ + +#endif + +#endif + diff --git a/OpenCV/Headers/ml/ml.hpp b/OpenCV/Headers/ml/ml.hpp new file mode 100644 index 0000000000..32047608e0 --- /dev/null +++ b/OpenCV/Headers/ml/ml.hpp @@ -0,0 +1,2133 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_ML_HPP__ +#define __OPENCV_ML_HPP__ + +#include "opencv2/core/core.hpp" +#include + +#ifdef __cplusplus + +#include +#include +#include + +// Apple defines a check() macro somewhere in the debug headers +// that interferes with a method definiton in this header +#undef check + +/****************************************************************************************\ +* Main struct definitions * +\****************************************************************************************/ + +/* log(2*PI) */ +#define CV_LOG2PI (1.8378770664093454835606594728112) + +/* columns of matrix are training samples */ +#define CV_COL_SAMPLE 0 + +/* rows of matrix are training samples */ +#define CV_ROW_SAMPLE 1 + +#define CV_IS_ROW_SAMPLE(flags) ((flags) & CV_ROW_SAMPLE) + +struct CvVectors +{ + int type; + int dims, count; + CvVectors* next; + union + { + uchar** ptr; + float** fl; + double** db; + } data; +}; + +#if 0 +/* A structure, representing the lattice range of statmodel parameters. + It is used for optimizing statmodel parameters by cross-validation method. + The lattice is logarithmic, so must be greater then 1. */ +typedef struct CvParamLattice +{ + double min_val; + double max_val; + double step; +} +CvParamLattice; + +CV_INLINE CvParamLattice cvParamLattice( double min_val, double max_val, + double log_step ) +{ + CvParamLattice pl; + pl.min_val = MIN( min_val, max_val ); + pl.max_val = MAX( min_val, max_val ); + pl.step = MAX( log_step, 1. ); + return pl; +} + +CV_INLINE CvParamLattice cvDefaultParamLattice( void ) +{ + CvParamLattice pl = {0,0,0}; + return pl; +} +#endif + +/* Variable type */ +#define CV_VAR_NUMERICAL 0 +#define CV_VAR_ORDERED 0 +#define CV_VAR_CATEGORICAL 1 + +#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm" +#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn" +#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian" +#define CV_TYPE_NAME_ML_EM "opencv-ml-em" +#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree" +#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree" +#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp" +#define CV_TYPE_NAME_ML_CNN "opencv-ml-cnn" +#define CV_TYPE_NAME_ML_RTREES "opencv-ml-random-trees" +#define CV_TYPE_NAME_ML_ERTREES "opencv-ml-extremely-randomized-trees" +#define CV_TYPE_NAME_ML_GBT "opencv-ml-gradient-boosting-trees" + +#define CV_TRAIN_ERROR 0 +#define CV_TEST_ERROR 1 + +class CV_EXPORTS_W CvStatModel +{ +public: + CvStatModel(); + virtual ~CvStatModel(); + + virtual void clear(); + + CV_WRAP virtual void save( const char* filename, const char* name=0 ) const; + CV_WRAP virtual void load( const char* filename, const char* name=0 ); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + +protected: + const char* default_model_name; +}; + +/****************************************************************************************\ +* Normal Bayes Classifier * +\****************************************************************************************/ + +/* The structure, representing the grid range of statmodel parameters. + It is used for optimizing statmodel accuracy by varying model parameters, + the accuracy estimate being computed by cross-validation. + The grid is logarithmic, so must be greater then 1. */ + +class CvMLData; + +struct CV_EXPORTS_W_MAP CvParamGrid +{ + // SVM params type + enum { SVM_C=0, SVM_GAMMA=1, SVM_P=2, SVM_NU=3, SVM_COEF=4, SVM_DEGREE=5 }; + + CvParamGrid() + { + min_val = max_val = step = 0; + } + + CvParamGrid( double min_val, double max_val, double log_step ); + //CvParamGrid( int param_id ); + bool check() const; + + CV_PROP_RW double min_val; + CV_PROP_RW double max_val; + CV_PROP_RW double step; +}; + +inline CvParamGrid::CvParamGrid( double _min_val, double _max_val, double _log_step ) +{ + min_val = _min_val; + max_val = _max_val; + step = _log_step; +} + +class CV_EXPORTS_W CvNormalBayesClassifier : public CvStatModel +{ +public: + CV_WRAP CvNormalBayesClassifier(); + virtual ~CvNormalBayesClassifier(); + + CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0 ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx = 0, const CvMat* sampleIdx=0, bool update=false ); + + virtual float predict( const CvMat* samples, CV_OUT CvMat* results=0 ) const; + CV_WRAP virtual void clear(); + + CV_WRAP CvNormalBayesClassifier( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat() ); + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx = cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + bool update=false ); + CV_WRAP virtual float predict( const cv::Mat& samples, CV_OUT cv::Mat* results=0 ) const; + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + +protected: + int var_count, var_all; + CvMat* var_idx; + CvMat* cls_labels; + CvMat** count; + CvMat** sum; + CvMat** productsum; + CvMat** avg; + CvMat** inv_eigen_values; + CvMat** cov_rotate_mats; + CvMat* c; +}; + + +/****************************************************************************************\ +* K-Nearest Neighbour Classifier * +\****************************************************************************************/ + +// k Nearest Neighbors +class CV_EXPORTS_W CvKNearest : public CvStatModel +{ +public: + + CV_WRAP CvKNearest(); + virtual ~CvKNearest(); + + CvKNearest( const CvMat* trainData, const CvMat* responses, + const CvMat* sampleIdx=0, bool isRegression=false, int max_k=32 ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* sampleIdx=0, bool is_regression=false, + int maxK=32, bool updateBase=false ); + + virtual float find_nearest( const CvMat* samples, int k, CV_OUT CvMat* results=0, + const float** neighbors=0, CV_OUT CvMat* neighborResponses=0, CV_OUT CvMat* dist=0 ) const; + + CV_WRAP CvKNearest( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, int max_k=32 ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, + int maxK=32, bool updateBase=false ); + + virtual float find_nearest( const cv::Mat& samples, int k, cv::Mat* results=0, + const float** neighbors=0, cv::Mat* neighborResponses=0, + cv::Mat* dist=0 ) const; + CV_WRAP virtual float find_nearest( const cv::Mat& samples, int k, CV_OUT cv::Mat& results, + CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const; + + virtual void clear(); + int get_max_k() const; + int get_var_count() const; + int get_sample_count() const; + bool is_regression() const; + + virtual float write_results( int k, int k1, int start, int end, + const float* neighbor_responses, const float* dist, CvMat* _results, + CvMat* _neighbor_responses, CvMat* _dist, Cv32suf* sort_buf ) const; + + virtual void find_neighbors_direct( const CvMat* _samples, int k, int start, int end, + float* neighbor_responses, const float** neighbors, float* dist ) const; + +protected: + + int max_k, var_count; + int total; + bool regression; + CvVectors* samples; +}; + +/****************************************************************************************\ +* Support Vector Machines * +\****************************************************************************************/ + +// SVM training parameters +struct CV_EXPORTS_W_MAP CvSVMParams +{ + CvSVMParams(); + CvSVMParams( int svm_type, int kernel_type, + double degree, double gamma, double coef0, + double Cvalue, double nu, double p, + CvMat* class_weights, CvTermCriteria term_crit ); + + CV_PROP_RW int svm_type; + CV_PROP_RW int kernel_type; + CV_PROP_RW double degree; // for poly + CV_PROP_RW double gamma; // for poly/rbf/sigmoid + CV_PROP_RW double coef0; // for poly/sigmoid + + CV_PROP_RW double C; // for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR + CV_PROP_RW double nu; // for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR + CV_PROP_RW double p; // for CV_SVM_EPS_SVR + CvMat* class_weights; // for CV_SVM_C_SVC + CV_PROP_RW CvTermCriteria term_crit; // termination criteria +}; + + +struct CV_EXPORTS CvSVMKernel +{ + typedef void (CvSVMKernel::*Calc)( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + CvSVMKernel(); + CvSVMKernel( const CvSVMParams* params, Calc _calc_func ); + virtual bool create( const CvSVMParams* params, Calc _calc_func ); + virtual ~CvSVMKernel(); + + virtual void clear(); + virtual void calc( int vcount, int n, const float** vecs, const float* another, float* results ); + + const CvSVMParams* params; + Calc calc_func; + + virtual void calc_non_rbf_base( int vec_count, int vec_size, const float** vecs, + const float* another, float* results, + double alpha, double beta ); + + virtual void calc_linear( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_rbf( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_poly( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_sigmoid( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); +}; + + +struct CvSVMKernelRow +{ + CvSVMKernelRow* prev; + CvSVMKernelRow* next; + float* data; +}; + + +struct CvSVMSolutionInfo +{ + double obj; + double rho; + double upper_bound_p; + double upper_bound_n; + double r; // for Solver_NU +}; + +class CV_EXPORTS CvSVMSolver +{ +public: + typedef bool (CvSVMSolver::*SelectWorkingSet)( int& i, int& j ); + typedef float* (CvSVMSolver::*GetRow)( int i, float* row, float* dst, bool existed ); + typedef void (CvSVMSolver::*CalcRho)( double& rho, double& r ); + + CvSVMSolver(); + + CvSVMSolver( int count, int var_count, const float** samples, schar* y, + int alpha_count, double* alpha, double Cp, double Cn, + CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row, + SelectWorkingSet select_working_set, CalcRho calc_rho ); + virtual bool create( int count, int var_count, const float** samples, schar* y, + int alpha_count, double* alpha, double Cp, double Cn, + CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row, + SelectWorkingSet select_working_set, CalcRho calc_rho ); + virtual ~CvSVMSolver(); + + virtual void clear(); + virtual bool solve_generic( CvSVMSolutionInfo& si ); + + virtual bool solve_c_svc( int count, int var_count, const float** samples, schar* y, + double Cp, double Cn, CvMemStorage* storage, + CvSVMKernel* kernel, double* alpha, CvSVMSolutionInfo& si ); + virtual bool solve_nu_svc( int count, int var_count, const float** samples, schar* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + virtual bool solve_one_class( int count, int var_count, const float** samples, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual bool solve_eps_svr( int count, int var_count, const float** samples, const float* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual bool solve_nu_svr( int count, int var_count, const float** samples, const float* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual float* get_row_base( int i, bool* _existed ); + virtual float* get_row( int i, float* dst ); + + int sample_count; + int var_count; + int cache_size; + int cache_line_size; + const float** samples; + const CvSVMParams* params; + CvMemStorage* storage; + CvSVMKernelRow lru_list; + CvSVMKernelRow* rows; + + int alpha_count; + + double* G; + double* alpha; + + // -1 - lower bound, 0 - free, 1 - upper bound + schar* alpha_status; + + schar* y; + double* b; + float* buf[2]; + double eps; + int max_iter; + double C[2]; // C[0] == Cn, C[1] == Cp + CvSVMKernel* kernel; + + SelectWorkingSet select_working_set_func; + CalcRho calc_rho_func; + GetRow get_row_func; + + virtual bool select_working_set( int& i, int& j ); + virtual bool select_working_set_nu_svm( int& i, int& j ); + virtual void calc_rho( double& rho, double& r ); + virtual void calc_rho_nu_svm( double& rho, double& r ); + + virtual float* get_row_svc( int i, float* row, float* dst, bool existed ); + virtual float* get_row_one_class( int i, float* row, float* dst, bool existed ); + virtual float* get_row_svr( int i, float* row, float* dst, bool existed ); +}; + + +struct CvSVMDecisionFunc +{ + double rho; + int sv_count; + double* alpha; + int* sv_index; +}; + + +// SVM model +class CV_EXPORTS_W CvSVM : public CvStatModel +{ +public: + // SVM type + enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 }; + + // SVM kernel type + enum { LINEAR=0, POLY=1, RBF=2, SIGMOID=3 }; + + // SVM params type + enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 }; + + CV_WRAP CvSVM(); + virtual ~CvSVM(); + + CvSVM( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0, + CvSVMParams params=CvSVMParams() ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0, + CvSVMParams params=CvSVMParams() ); + + virtual bool train_auto( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx, const CvMat* sampleIdx, CvSVMParams params, + int kfold = 10, + CvParamGrid Cgrid = get_default_grid(CvSVM::C), + CvParamGrid gammaGrid = get_default_grid(CvSVM::GAMMA), + CvParamGrid pGrid = get_default_grid(CvSVM::P), + CvParamGrid nuGrid = get_default_grid(CvSVM::NU), + CvParamGrid coeffGrid = get_default_grid(CvSVM::COEF), + CvParamGrid degreeGrid = get_default_grid(CvSVM::DEGREE), + bool balanced=false ); + + virtual float predict( const CvMat* sample, bool returnDFVal=false ) const; + virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const; + + CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + CvSVMParams params=CvSVMParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + CvSVMParams params=CvSVMParams() ); + + CV_WRAP virtual bool train_auto( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx, const cv::Mat& sampleIdx, CvSVMParams params, + int k_fold = 10, + CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), + CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), + CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), + CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), + CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), + CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), + bool balanced=false); + CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const; + CV_WRAP_AS(predict_all) void predict( cv::InputArray samples, cv::OutputArray results ) const; + + CV_WRAP virtual int get_support_vector_count() const; + virtual const float* get_support_vector(int i) const; + virtual CvSVMParams get_params() const { return params; }; + CV_WRAP virtual void clear(); + + static CvParamGrid get_default_grid( int param_id ); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + CV_WRAP int get_var_count() const { return var_idx ? var_idx->cols : var_all; } + +protected: + + virtual bool set_params( const CvSVMParams& params ); + virtual bool train1( int sample_count, int var_count, const float** samples, + const void* responses, double Cp, double Cn, + CvMemStorage* _storage, double* alpha, double& rho ); + virtual bool do_train( int svm_type, int sample_count, int var_count, const float** samples, + const CvMat* responses, CvMemStorage* _storage, double* alpha ); + virtual void create_kernel(); + virtual void create_solver(); + + virtual float predict( const float* row_sample, int row_len, bool returnDFVal=false ) const; + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvSVMParams params; + CvMat* class_labels; + int var_all; + float** sv; + int sv_total; + CvMat* var_idx; + CvMat* class_weights; + CvSVMDecisionFunc* decision_func; + CvMemStorage* storage; + + CvSVMSolver* solver; + CvSVMKernel* kernel; +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ +namespace cv +{ +class CV_EXPORTS_W EM : public Algorithm +{ +public: + // Type of covariation matrices + enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL}; + + // Default parameters + enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; + + // The initial step + enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; + + CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, + const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, + EM::DEFAULT_MAX_ITERS, FLT_EPSILON)); + + virtual ~EM(); + CV_WRAP virtual void clear(); + + CV_WRAP virtual bool train(InputArray samples, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP virtual bool trainE(InputArray samples, + InputArray means0, + InputArray covs0=noArray(), + InputArray weights0=noArray(), + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP virtual bool trainM(InputArray samples, + InputArray probs0, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP Vec2d predict(InputArray sample, + OutputArray probs=noArray()) const; + + CV_WRAP bool isTrained() const; + + AlgorithmInfo* info() const; + virtual void read(const FileNode& fn); + +protected: + + virtual void setTrainData(int startStep, const Mat& samples, + const Mat* probs0, + const Mat* means0, + const vector* covs0, + const Mat* weights0); + + bool doTrain(int startStep, + OutputArray logLikelihoods, + OutputArray labels, + OutputArray probs); + virtual void eStep(); + virtual void mStep(); + + void clusterTrainSamples(); + void decomposeCovs(); + void computeLogWeightDivDet(); + + Vec2d computeProbabilities(const Mat& sample, Mat* probs) const; + + // all inner matrices have type CV_64FC1 + CV_PROP_RW int nclusters; + CV_PROP_RW int covMatType; + CV_PROP_RW int maxIters; + CV_PROP_RW double epsilon; + + Mat trainSamples; + Mat trainProbs; + Mat trainLogLikelihoods; + Mat trainLabels; + + CV_PROP Mat weights; + CV_PROP Mat means; + CV_PROP vector covs; + + vector covsEigenValues; + vector covsRotateMats; + vector invCovsEigenValues; + Mat logWeightDivDet; +}; +} // namespace cv + +/****************************************************************************************\ +* Decision Tree * +\****************************************************************************************/\ +struct CvPair16u32s +{ + unsigned short* u; + int* i; +}; + + +#define CV_DTREE_CAT_DIR(idx,subset) \ + (2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1) + +struct CvDTreeSplit +{ + int var_idx; + int condensed_idx; + int inversed; + float quality; + CvDTreeSplit* next; + union + { + int subset[2]; + struct + { + float c; + int split_point; + } + ord; + }; +}; + +struct CvDTreeNode +{ + int class_idx; + int Tn; + double value; + + CvDTreeNode* parent; + CvDTreeNode* left; + CvDTreeNode* right; + + CvDTreeSplit* split; + + int sample_count; + int depth; + int* num_valid; + int offset; + int buf_idx; + double maxlr; + + // global pruning data + int complexity; + double alpha; + double node_risk, tree_risk, tree_error; + + // cross-validation pruning data + int* cv_Tn; + double* cv_node_risk; + double* cv_node_error; + + int get_num_valid(int vi) { return num_valid ? num_valid[vi] : sample_count; } + void set_num_valid(int vi, int n) { if( num_valid ) num_valid[vi] = n; } +}; + + +struct CV_EXPORTS_W_MAP CvDTreeParams +{ + CV_PROP_RW int max_categories; + CV_PROP_RW int max_depth; + CV_PROP_RW int min_sample_count; + CV_PROP_RW int cv_folds; + CV_PROP_RW bool use_surrogates; + CV_PROP_RW bool use_1se_rule; + CV_PROP_RW bool truncate_pruned_tree; + CV_PROP_RW float regression_accuracy; + const float* priors; + + CvDTreeParams(); + CvDTreeParams( int max_depth, int min_sample_count, + float regression_accuracy, bool use_surrogates, + int max_categories, int cv_folds, + bool use_1se_rule, bool truncate_pruned_tree, + const float* priors ); +}; + + +struct CV_EXPORTS CvDTreeTrainData +{ + CvDTreeTrainData(); + CvDTreeTrainData( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false ); + virtual ~CvDTreeTrainData(); + + virtual void set_data( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false, + bool _update_data=false ); + virtual void do_responses_copy(); + + virtual void get_vectors( const CvMat* _subsample_idx, + float* values, uchar* missing, float* responses, bool get_class_idx=false ); + + virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + // release all the data + virtual void clear(); + + int get_num_classes() const; + int get_var_type(int vi) const; + int get_work_var_count() const {return work_var_count;} + + virtual const float* get_ord_responses( CvDTreeNode* n, float* values_buf, int* sample_indices_buf ); + virtual const int* get_class_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf ); + virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf ); + virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* sorted_indices_buf, + const float** ord_values, const int** sorted_indices, int* sample_indices_buf ); + virtual int get_child_buf_idx( CvDTreeNode* n ); + + //////////////////////////////////// + + virtual bool set_params( const CvDTreeParams& params ); + virtual CvDTreeNode* new_node( CvDTreeNode* parent, int count, + int storage_idx, int offset ); + + virtual CvDTreeSplit* new_split_ord( int vi, float cmp_val, + int split_point, int inversed, float quality ); + virtual CvDTreeSplit* new_split_cat( int vi, float quality ); + virtual void free_node_data( CvDTreeNode* node ); + virtual void free_train_data(); + virtual void free_node( CvDTreeNode* node ); + + int sample_count, var_all, var_count, max_c_count; + int ord_var_count, cat_var_count, work_var_count; + bool have_labels, have_priors; + bool is_classifier; + int tflag; + + const CvMat* train_data; + const CvMat* responses; + CvMat* responses_copy; // used in Boosting + + int buf_count, buf_size; + bool shared; + int is_buf_16u; + + CvMat* cat_count; + CvMat* cat_ofs; + CvMat* cat_map; + + CvMat* counts; + CvMat* buf; + CvMat* direction; + CvMat* split_buf; + + CvMat* var_idx; + CvMat* var_type; // i-th element = + // k<0 - ordered + // k>=0 - categorical, see k-th element of cat_* arrays + CvMat* priors; + CvMat* priors_mult; + + CvDTreeParams params; + + CvMemStorage* tree_storage; + CvMemStorage* temp_storage; + + CvDTreeNode* data_root; + + CvSet* node_heap; + CvSet* split_heap; + CvSet* cv_heap; + CvSet* nv_heap; + + cv::RNG* rng; +}; + +class CvDTree; +class CvForestTree; + +namespace cv +{ + struct DTreeBestSplitFinder; + struct ForestTreeBestSplitFinder; +} + +class CV_EXPORTS_W CvDTree : public CvStatModel +{ +public: + CV_WRAP CvDTree(); + virtual ~CvDTree(); + + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + + virtual bool train( CvMLData* trainData, CvDTreeParams params=CvDTreeParams() ); + + // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + virtual float calc_error( CvMLData* trainData, int type, std::vector *resp = 0 ); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* subsampleIdx ); + + virtual CvDTreeNode* predict( const CvMat* sample, const CvMat* missingDataMask=0, + bool preprocessedInput=false ) const; + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvDTreeParams params=CvDTreeParams() ); + + CV_WRAP virtual CvDTreeNode* predict( const cv::Mat& sample, const cv::Mat& missingDataMask=cv::Mat(), + bool preprocessedInput=false ) const; + CV_WRAP virtual cv::Mat getVarImportance(); + + virtual const CvMat* get_var_importance(); + CV_WRAP virtual void clear(); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + + // special read & write methods for trees in the tree ensembles + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + virtual void write( CvFileStorage* fs ) const; + + const CvDTreeNode* get_root() const; + int get_pruned_tree_idx() const; + CvDTreeTrainData* get_data(); + +protected: + friend struct cv::DTreeBestSplitFinder; + + virtual bool do_train( const CvMat* _subsample_idx ); + + virtual void try_split_node( CvDTreeNode* n ); + virtual void split_node_data( CvDTreeNode* n ); + virtual CvDTreeSplit* find_best_split( CvDTreeNode* n ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual double calc_node_dir( CvDTreeNode* node ); + virtual void complete_node_dir( CvDTreeNode* node ); + virtual void cluster_categories( const int* vectors, int vector_count, + int var_count, int* sums, int k, int* cluster_labels ); + + virtual void calc_node_value( CvDTreeNode* node ); + + virtual void prune_cv(); + virtual double update_tree_rnc( int T, int fold ); + virtual int cut_tree( int T, int fold, double min_alpha ); + virtual void free_prune_data(bool cut_tree); + virtual void free_tree(); + + virtual void write_node( CvFileStorage* fs, CvDTreeNode* node ) const; + virtual void write_split( CvFileStorage* fs, CvDTreeSplit* split ) const; + virtual CvDTreeNode* read_node( CvFileStorage* fs, CvFileNode* node, CvDTreeNode* parent ); + virtual CvDTreeSplit* read_split( CvFileStorage* fs, CvFileNode* node ); + virtual void write_tree_nodes( CvFileStorage* fs ) const; + virtual void read_tree_nodes( CvFileStorage* fs, CvFileNode* node ); + + CvDTreeNode* root; + CvMat* var_importance; + CvDTreeTrainData* data; + +public: + int pruned_tree_idx; +}; + + +/****************************************************************************************\ +* Random Trees Classifier * +\****************************************************************************************/ + +class CvRTrees; + +class CV_EXPORTS CvForestTree: public CvDTree +{ +public: + CvForestTree(); + virtual ~CvForestTree(); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx, CvRTrees* forest ); + + virtual int get_var_count() const {return data ? data->var_count : 0;} + virtual void read( CvFileStorage* fs, CvFileNode* node, CvRTrees* forest, CvDTreeTrainData* _data ); + + /* dummy methods to avoid warnings: BEGIN */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx ); + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + /* dummy methods to avoid warnings: END */ + +protected: + friend struct cv::ForestTreeBestSplitFinder; + + virtual CvDTreeSplit* find_best_split( CvDTreeNode* n ); + CvRTrees* forest; +}; + + +struct CV_EXPORTS_W_MAP CvRTParams : public CvDTreeParams +{ + //Parameters for the forest + CV_PROP_RW bool calc_var_importance; // true <=> RF processes variable importance + CV_PROP_RW int nactive_vars; + CV_PROP_RW CvTermCriteria term_crit; + + CvRTParams(); + CvRTParams( int max_depth, int min_sample_count, + float regression_accuracy, bool use_surrogates, + int max_categories, const float* priors, bool calc_var_importance, + int nactive_vars, int max_num_of_trees_in_the_forest, + float forest_accuracy, int termcrit_type ); +}; + + +class CV_EXPORTS_W CvRTrees : public CvStatModel +{ +public: + CV_WRAP CvRTrees(); + virtual ~CvRTrees(); + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvRTParams params=CvRTParams() ); + + virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() ); + virtual float predict( const CvMat* sample, const CvMat* missing = 0 ) const; + virtual float predict_prob( const CvMat* sample, const CvMat* missing = 0 ) const; + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvRTParams params=CvRTParams() ); + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const; + CV_WRAP virtual float predict_prob( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const; + CV_WRAP virtual cv::Mat getVarImportance(); + + CV_WRAP virtual void clear(); + + virtual const CvMat* get_var_importance(); + virtual float get_proximity( const CvMat* sample1, const CvMat* sample2, + const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const; + + virtual float calc_error( CvMLData* data, int type , std::vector* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + + virtual float get_train_error(); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + + CvMat* get_active_var_mask(); + CvRNG* get_rng(); + + int get_tree_count() const; + CvForestTree* get_tree(int i) const; + +protected: + virtual std::string getName() const; + + virtual bool grow_forest( const CvTermCriteria term_crit ); + + // array of the trees of the forest + CvForestTree** trees; + CvDTreeTrainData* data; + int ntrees; + int nclasses; + double oob_error; + CvMat* var_importance; + int nsamples; + + cv::RNG* rng; + CvMat* active_var_mask; +}; + +/****************************************************************************************\ +* Extremely randomized trees Classifier * +\****************************************************************************************/ +struct CV_EXPORTS CvERTreeTrainData : public CvDTreeTrainData +{ + virtual void set_data( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false, + bool _update_data=false ); + virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* missing_buf, + const float** ord_values, const int** missing, int* sample_buf = 0 ); + virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf ); + virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf ); + virtual void get_vectors( const CvMat* _subsample_idx, float* values, uchar* missing, + float* responses, bool get_class_idx=false ); + virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + const CvMat* missing_mask; +}; + +class CV_EXPORTS CvForestERTree : public CvForestTree +{ +protected: + virtual double calc_node_dir( CvDTreeNode* node ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual void split_node_data( CvDTreeNode* n ); +}; + +class CV_EXPORTS_W CvERTrees : public CvRTrees +{ +public: + CV_WRAP CvERTrees(); + virtual ~CvERTrees(); + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvRTParams params=CvRTParams()); + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvRTParams params=CvRTParams()); + virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() ); +protected: + virtual std::string getName() const; + virtual bool grow_forest( const CvTermCriteria term_crit ); +}; + + +/****************************************************************************************\ +* Boosted tree classifier * +\****************************************************************************************/ + +struct CV_EXPORTS_W_MAP CvBoostParams : public CvDTreeParams +{ + CV_PROP_RW int boost_type; + CV_PROP_RW int weak_count; + CV_PROP_RW int split_criteria; + CV_PROP_RW double weight_trim_rate; + + CvBoostParams(); + CvBoostParams( int boost_type, int weak_count, double weight_trim_rate, + int max_depth, bool use_surrogates, const float* priors ); +}; + + +class CvBoost; + +class CV_EXPORTS CvBoostTree: public CvDTree +{ +public: + CvBoostTree(); + virtual ~CvBoostTree(); + + virtual bool train( CvDTreeTrainData* trainData, + const CvMat* subsample_idx, CvBoost* ensemble ); + + virtual void scale( double s ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvBoost* ensemble, CvDTreeTrainData* _data ); + virtual void clear(); + + /* dummy methods to avoid warnings: BEGIN */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx ); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + /* dummy methods to avoid warnings: END */ + +protected: + + virtual void try_split_node( CvDTreeNode* n ); + virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual void calc_node_value( CvDTreeNode* n ); + virtual double calc_node_dir( CvDTreeNode* n ); + + CvBoost* ensemble; +}; + + +class CV_EXPORTS_W CvBoost : public CvStatModel +{ +public: + // Boosting type + enum { DISCRETE=0, REAL=1, LOGIT=2, GENTLE=3 }; + + // Splitting criteria + enum { DEFAULT=0, GINI=1, MISCLASS=3, SQERR=4 }; + + CV_WRAP CvBoost(); + virtual ~CvBoost(); + + CvBoost( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvBoostParams params=CvBoostParams() ); + + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvBoostParams params=CvBoostParams(), + bool update=false ); + + virtual bool train( CvMLData* data, + CvBoostParams params=CvBoostParams(), + bool update=false ); + + virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, + bool raw_mode=false, bool return_sum=false ) const; + + CV_WRAP CvBoost( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvBoostParams params=CvBoostParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvBoostParams params=CvBoostParams(), + bool update=false ); + + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(), + const cv::Range& slice=cv::Range::all(), bool rawMode=false, + bool returnSum=false ) const; + + virtual float calc_error( CvMLData* _data, int type , std::vector *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + + CV_WRAP virtual void prune( CvSlice slice ); + + CV_WRAP virtual void clear(); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + virtual const CvMat* get_active_vars(bool absolute_idx=true); + + CvSeq* get_weak_predictors(); + + CvMat* get_weights(); + CvMat* get_subtree_weights(); + CvMat* get_weak_response(); + const CvBoostParams& get_params() const; + const CvDTreeTrainData* get_data() const; + +protected: + + virtual bool set_params( const CvBoostParams& params ); + virtual void update_weights( CvBoostTree* tree ); + virtual void trim_weights(); + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvDTreeTrainData* data; + CvBoostParams params; + CvSeq* weak; + + CvMat* active_vars; + CvMat* active_vars_abs; + bool have_active_cat_vars; + + CvMat* orig_response; + CvMat* sum_response; + CvMat* weak_eval; + CvMat* subsample_mask; + CvMat* weights; + CvMat* subtree_weights; + bool have_subsample; +}; + + +/****************************************************************************************\ +* Gradient Boosted Trees * +\****************************************************************************************/ + +// DataType: STRUCT CvGBTreesParams +// Parameters of GBT (Gradient Boosted trees model), including single +// tree settings and ensemble parameters. +// +// weak_count - count of trees in the ensemble +// loss_function_type - loss function used for ensemble training +// subsample_portion - portion of whole training set used for +// every single tree training. +// subsample_portion value is in (0.0, 1.0]. +// subsample_portion == 1.0 when whole dataset is +// used on each step. Count of sample used on each +// step is computed as +// int(total_samples_count * subsample_portion). +// shrinkage - regularization parameter. +// Each tree prediction is multiplied on shrinkage value. + + +struct CV_EXPORTS_W_MAP CvGBTreesParams : public CvDTreeParams +{ + CV_PROP_RW int weak_count; + CV_PROP_RW int loss_function_type; + CV_PROP_RW float subsample_portion; + CV_PROP_RW float shrinkage; + + CvGBTreesParams(); + CvGBTreesParams( int loss_function_type, int weak_count, float shrinkage, + float subsample_portion, int max_depth, bool use_surrogates ); +}; + +// DataType: CLASS CvGBTrees +// Gradient Boosting Trees (GBT) algorithm implementation. +// +// data - training dataset +// params - parameters of the CvGBTrees +// weak - array[0..(class_count-1)] of CvSeq +// for storing tree ensembles +// orig_response - original responses of the training set samples +// sum_response - predicitons of the current model on the training dataset. +// this matrix is updated on every iteration. +// sum_response_tmp - predicitons of the model on the training set on the next +// step. On every iteration values of sum_responses_tmp are +// computed via sum_responses values. When the current +// step is complete sum_response values become equal to +// sum_responses_tmp. +// sampleIdx - indices of samples used for training the ensemble. +// CvGBTrees training procedure takes a set of samples +// (train_data) and a set of responses (responses). +// Only pairs (train_data[i], responses[i]), where i is +// in sample_idx are used for training the ensemble. +// subsample_train - indices of samples used for training a single decision +// tree on the current step. This indices are countered +// relatively to the sample_idx, so that pairs +// (train_data[sample_idx[i]], responses[sample_idx[i]]) +// are used for training a decision tree. +// Training set is randomly splited +// in two parts (subsample_train and subsample_test) +// on every iteration accordingly to the portion parameter. +// subsample_test - relative indices of samples from the training set, +// which are not used for training a tree on the current +// step. +// missing - mask of the missing values in the training set. This +// matrix has the same size as train_data. 1 - missing +// value, 0 - not a missing value. +// class_labels - output class labels map. +// rng - random number generator. Used for spliting the +// training set. +// class_count - count of output classes. +// class_count == 1 in the case of regression, +// and > 1 in the case of classification. +// delta - Huber loss function parameter. +// base_value - start point of the gradient descent procedure. +// model prediction is +// f(x) = f_0 + sum_{i=1..weak_count-1}(f_i(x)), where +// f_0 is the base value. + + + +class CV_EXPORTS_W CvGBTrees : public CvStatModel +{ +public: + + /* + // DataType: ENUM + // Loss functions implemented in CvGBTrees. + // + // SQUARED_LOSS + // problem: regression + // loss = (x - x')^2 + // + // ABSOLUTE_LOSS + // problem: regression + // loss = abs(x - x') + // + // HUBER_LOSS + // problem: regression + // loss = delta*( abs(x - x') - delta/2), if abs(x - x') > delta + // 1/2*(x - x')^2, if abs(x - x') <= delta, + // where delta is the alpha-quantile of pseudo responses from + // the training set. + // + // DEVIANCE_LOSS + // problem: classification + // + */ + enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS}; + + + /* + // Default constructor. Creates a model only (without training). + // Should be followed by one form of the train(...) function. + // + // API + // CvGBTrees(); + + // INPUT + // OUTPUT + // RESULT + */ + CV_WRAP CvGBTrees(); + + + /* + // Full form constructor. Creates a gradient boosting model and does the + // train. + // + // API + // CvGBTrees( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams() ); + + // INPUT + // trainData - a set of input feature vectors. + // size of matrix is + // x + // or x + // depending on the tflag parameter. + // matrix values are float. + // tflag - a flag showing how do samples stored in the + // trainData matrix row by row (tflag=CV_ROW_SAMPLE) + // or column by column (tflag=CV_COL_SAMPLE). + // responses - a vector of responses corresponding to the samples + // in trainData. + // varIdx - indices of used variables. zero value means that all + // variables are active. + // sampleIdx - indices of used samples. zero value means that all + // samples from trainData are in the training set. + // varType - vector of length. gives every + // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED. + // varType = 0 means all variables are numerical. + // missingDataMask - a mask of misiing values in trainData. + // missingDataMask = 0 means that there are no missing + // values. + // params - parameters of GTB algorithm. + // OUTPUT + // RESULT + */ + CvGBTrees( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams() ); + + + /* + // Destructor. + */ + virtual ~CvGBTrees(); + + + /* + // Gradient tree boosting model training + // + // API + // virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + // INPUT + // trainData - a set of input feature vectors. + // size of matrix is + // x + // or x + // depending on the tflag parameter. + // matrix values are float. + // tflag - a flag showing how do samples stored in the + // trainData matrix row by row (tflag=CV_ROW_SAMPLE) + // or column by column (tflag=CV_COL_SAMPLE). + // responses - a vector of responses corresponding to the samples + // in trainData. + // varIdx - indices of used variables. zero value means that all + // variables are active. + // sampleIdx - indices of used samples. zero value means that all + // samples from trainData are in the training set. + // varType - vector of length. gives every + // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED. + // varType = 0 means all variables are numerical. + // missingDataMask - a mask of misiing values in trainData. + // missingDataMask = 0 means that there are no missing + // values. + // params - parameters of GTB algorithm. + // update - is not supported now. (!) + // OUTPUT + // RESULT + // Error state. + */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + + /* + // Gradient tree boosting model training + // + // API + // virtual bool train( CvMLData* data, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ) {return false;}; + + // INPUT + // data - training set. + // params - parameters of GTB algorithm. + // update - is not supported now. (!) + // OUTPUT + // RESULT + // Error state. + */ + virtual bool train( CvMLData* data, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + + /* + // Response value prediction + // + // API + // virtual float predict_serial( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + // INPUT + // sample - input sample of the same type as in the training set. + // missing - missing values mask. missing=0 if there are no + // missing values in sample vector. + // weak_responses - predictions of all of the trees. + // not implemented (!) + // slice - part of the ensemble used for prediction. + // slice = CV_WHOLE_SEQ when all trees are used. + // k - number of ensemble used. + // k is in {-1,0,1,..,}. + // in the case of classification problem + // ensembles are built. + // If k = -1 ordinary prediction is the result, + // otherwise function gives the prediction of the + // k-th ensemble only. + // OUTPUT + // RESULT + // Predicted value. + */ + virtual float predict_serial( const CvMat* sample, const CvMat* missing=0, + CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + /* + // Response value prediction. + // Parallel version (in the case of TBB existence) + // + // API + // virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + // INPUT + // sample - input sample of the same type as in the training set. + // missing - missing values mask. missing=0 if there are no + // missing values in sample vector. + // weak_responses - predictions of all of the trees. + // not implemented (!) + // slice - part of the ensemble used for prediction. + // slice = CV_WHOLE_SEQ when all trees are used. + // k - number of ensemble used. + // k is in {-1,0,1,..,}. + // in the case of classification problem + // ensembles are built. + // If k = -1 ordinary prediction is the result, + // otherwise function gives the prediction of the + // k-th ensemble only. + // OUTPUT + // RESULT + // Predicted value. + */ + virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + /* + // Deletes all the data. + // + // API + // virtual void clear(); + + // INPUT + // OUTPUT + // delete data, weak, orig_response, sum_response, + // weak_eval, subsample_train, subsample_test, + // sample_idx, missing, lass_labels + // delta = 0.0 + // RESULT + */ + CV_WRAP virtual void clear(); + + /* + // Compute error on the train/test set. + // + // API + // virtual float calc_error( CvMLData* _data, int type, + // std::vector *resp = 0 ); + // + // INPUT + // data - dataset + // type - defines which error is to compute: train (CV_TRAIN_ERROR) or + // test (CV_TEST_ERROR). + // OUTPUT + // resp - vector of predicitons + // RESULT + // Error value. + */ + virtual float calc_error( CvMLData* _data, int type, + std::vector *resp = 0 ); + + /* + // + // Write parameters of the gtb model and data. Write learned model. + // + // API + // virtual void write( CvFileStorage* fs, const char* name ) const; + // + // INPUT + // fs - file storage to read parameters from. + // name - model name. + // OUTPUT + // RESULT + */ + virtual void write( CvFileStorage* fs, const char* name ) const; + + + /* + // + // Read parameters of the gtb model and data. Read learned model. + // + // API + // virtual void read( CvFileStorage* fs, CvFileNode* node ); + // + // INPUT + // fs - file storage to read parameters from. + // node - file node. + // OUTPUT + // RESULT + */ + virtual void read( CvFileStorage* fs, CvFileNode* node ); + + + // new-style C++ interface + CV_WRAP CvGBTrees( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvGBTreesParams params=CvGBTreesParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(), + const cv::Range& slice = cv::Range::all(), + int k=-1 ) const; + +protected: + + /* + // Compute the gradient vector components. + // + // API + // virtual void find_gradient( const int k = 0); + + // INPUT + // k - used for classification problem, determining current + // tree ensemble. + // OUTPUT + // changes components of data->responses + // which correspond to samples used for training + // on the current step. + // RESULT + */ + virtual void find_gradient( const int k = 0); + + + /* + // + // Change values in tree leaves according to the used loss function. + // + // API + // virtual void change_values(CvDTree* tree, const int k = 0); + // + // INPUT + // tree - decision tree to change. + // k - used for classification problem, determining current + // tree ensemble. + // OUTPUT + // changes 'value' fields of the trees' leaves. + // changes sum_response_tmp. + // RESULT + */ + virtual void change_values(CvDTree* tree, const int k = 0); + + + /* + // + // Find optimal constant prediction value according to the used loss + // function. + // The goal is to find a constant which gives the minimal summary loss + // on the _Idx samples. + // + // API + // virtual float find_optimal_value( const CvMat* _Idx ); + // + // INPUT + // _Idx - indices of the samples from the training set. + // OUTPUT + // RESULT + // optimal constant value. + */ + virtual float find_optimal_value( const CvMat* _Idx ); + + + /* + // + // Randomly split the whole training set in two parts according + // to params.portion. + // + // API + // virtual void do_subsample(); + // + // INPUT + // OUTPUT + // subsample_train - indices of samples used for training + // subsample_test - indices of samples used for test + // RESULT + */ + virtual void do_subsample(); + + + /* + // + // Internal recursive function giving an array of subtree tree leaves. + // + // API + // void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node ); + // + // INPUT + // node - current leaf. + // OUTPUT + // count - count of leaves in the subtree. + // leaves - array of pointers to leaves. + // RESULT + */ + void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node ); + + + /* + // + // Get leaves of the tree. + // + // API + // CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len ); + // + // INPUT + // dtree - decision tree. + // OUTPUT + // len - count of the leaves. + // RESULT + // CvDTreeNode** - array of pointers to leaves. + */ + CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len ); + + + /* + // + // Is it a regression or a classification. + // + // API + // bool problem_type(); + // + // INPUT + // OUTPUT + // RESULT + // false if it is a classification problem, + // true - if regression. + */ + virtual bool problem_type() const; + + + /* + // + // Write parameters of the gtb model. + // + // API + // virtual void write_params( CvFileStorage* fs ) const; + // + // INPUT + // fs - file storage to write parameters to. + // OUTPUT + // RESULT + */ + virtual void write_params( CvFileStorage* fs ) const; + + + /* + // + // Read parameters of the gtb model and data. + // + // API + // virtual void read_params( CvFileStorage* fs ); + // + // INPUT + // fs - file storage to read parameters from. + // OUTPUT + // params - parameters of the gtb model. + // data - contains information about the structure + // of the data set (count of variables, + // their types, etc.). + // class_labels - output class labels map. + // RESULT + */ + virtual void read_params( CvFileStorage* fs, CvFileNode* fnode ); + int get_len(const CvMat* mat) const; + + + CvDTreeTrainData* data; + CvGBTreesParams params; + + CvSeq** weak; + CvMat* orig_response; + CvMat* sum_response; + CvMat* sum_response_tmp; + CvMat* sample_idx; + CvMat* subsample_train; + CvMat* subsample_test; + CvMat* missing; + CvMat* class_labels; + + cv::RNG* rng; + + int class_count; + float delta; + float base_value; + +}; + + + +/****************************************************************************************\ +* Artificial Neural Networks (ANN) * +\****************************************************************************************/ + +/////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// + +struct CV_EXPORTS_W_MAP CvANN_MLP_TrainParams +{ + CvANN_MLP_TrainParams(); + CvANN_MLP_TrainParams( CvTermCriteria term_crit, int train_method, + double param1, double param2=0 ); + ~CvANN_MLP_TrainParams(); + + enum { BACKPROP=0, RPROP=1 }; + + CV_PROP_RW CvTermCriteria term_crit; + CV_PROP_RW int train_method; + + // backpropagation parameters + CV_PROP_RW double bp_dw_scale, bp_moment_scale; + + // rprop parameters + CV_PROP_RW double rp_dw0, rp_dw_plus, rp_dw_minus, rp_dw_min, rp_dw_max; +}; + + +class CV_EXPORTS_W CvANN_MLP : public CvStatModel +{ +public: + CV_WRAP CvANN_MLP(); + CvANN_MLP( const CvMat* layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + virtual ~CvANN_MLP(); + + virtual void create( const CvMat* layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + virtual int train( const CvMat* inputs, const CvMat* outputs, + const CvMat* sampleWeights, const CvMat* sampleIdx=0, + CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), + int flags=0 ); + virtual float predict( const CvMat* inputs, CV_OUT CvMat* outputs ) const; + + CV_WRAP CvANN_MLP( const cv::Mat& layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + CV_WRAP virtual void create( const cv::Mat& layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + CV_WRAP virtual int train( const cv::Mat& inputs, const cv::Mat& outputs, + const cv::Mat& sampleWeights, const cv::Mat& sampleIdx=cv::Mat(), + CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), + int flags=0 ); + + CV_WRAP virtual float predict( const cv::Mat& inputs, CV_OUT cv::Mat& outputs ) const; + + CV_WRAP virtual void clear(); + + // possible activation functions + enum { IDENTITY = 0, SIGMOID_SYM = 1, GAUSSIAN = 2 }; + + // available training flags + enum { UPDATE_WEIGHTS = 1, NO_INPUT_SCALE = 2, NO_OUTPUT_SCALE = 4 }; + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* storage, const char* name ) const; + + int get_layer_count() { return layer_sizes ? layer_sizes->cols : 0; } + const CvMat* get_layer_sizes() { return layer_sizes; } + double* get_weights(int layer) + { + return layer_sizes && weights && + (unsigned)layer <= (unsigned)layer_sizes->cols ? weights[layer] : 0; + } + + virtual void calc_activ_func_deriv( CvMat* xf, CvMat* deriv, const double* bias ) const; + +protected: + + virtual bool prepare_to_train( const CvMat* _inputs, const CvMat* _outputs, + const CvMat* _sample_weights, const CvMat* sampleIdx, + CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags ); + + // sequential random backpropagation + virtual int train_backprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); + + // RPROP algorithm + virtual int train_rprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); + + virtual void calc_activ_func( CvMat* xf, const double* bias ) const; + virtual void set_activ_func( int _activ_func=SIGMOID_SYM, + double _f_param1=0, double _f_param2=0 ); + virtual void init_weights(); + virtual void scale_input( const CvMat* _src, CvMat* _dst ) const; + virtual void scale_output( const CvMat* _src, CvMat* _dst ) const; + virtual void calc_input_scale( const CvVectors* vecs, int flags ); + virtual void calc_output_scale( const CvVectors* vecs, int flags ); + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvMat* layer_sizes; + CvMat* wbuf; + CvMat* sample_weights; + double** weights; + double f_param1, f_param2; + double min_val, max_val, min_val1, max_val1; + int activ_func; + int max_count, max_buf_sz; + CvANN_MLP_TrainParams params; + cv::RNG* rng; +}; + +/****************************************************************************************\ +* Auxilary functions declarations * +\****************************************************************************************/ + +/* Generates from multivariate normal distribution, where - is an + average row vector, - symmetric covariation matrix */ +CVAPI(void) cvRandMVNormal( CvMat* mean, CvMat* cov, CvMat* sample, + CvRNG* rng CV_DEFAULT(0) ); + +/* Generates sample from gaussian mixture distribution */ +CVAPI(void) cvRandGaussMixture( CvMat* means[], + CvMat* covs[], + float weights[], + int clsnum, + CvMat* sample, + CvMat* sampClasses CV_DEFAULT(0) ); + +#define CV_TS_CONCENTRIC_SPHERES 0 + +/* creates test set */ +CVAPI(void) cvCreateTestSet( int type, CvMat** samples, + int num_samples, + int num_features, + CvMat** responses, + int num_classes, ... ); + +/****************************************************************************************\ +* Data * +\****************************************************************************************/ + +#define CV_COUNT 0 +#define CV_PORTION 1 + +struct CV_EXPORTS CvTrainTestSplit +{ + CvTrainTestSplit(); + CvTrainTestSplit( int train_sample_count, bool mix = true); + CvTrainTestSplit( float train_sample_portion, bool mix = true); + + union + { + int count; + float portion; + } train_sample_part; + int train_sample_part_mode; + + bool mix; +}; + +class CV_EXPORTS CvMLData +{ +public: + CvMLData(); + virtual ~CvMLData(); + + // returns: + // 0 - OK + // -1 - file can not be opened or is not correct + int read_csv( const char* filename ); + + const CvMat* get_values() const; + const CvMat* get_responses(); + const CvMat* get_missing() const; + + void set_response_idx( int idx ); // old response become predictors, new response_idx = idx + // if idx < 0 there will be no response + int get_response_idx() const; + + void set_train_test_split( const CvTrainTestSplit * spl ); + const CvMat* get_train_sample_idx() const; + const CvMat* get_test_sample_idx() const; + void mix_train_and_test_idx(); + + const CvMat* get_var_idx(); + void chahge_var_idx( int vi, bool state ); // misspelled (saved for back compitability), + // use change_var_idx + void change_var_idx( int vi, bool state ); // state == true to set vi-variable as predictor + + const CvMat* get_var_types(); + int get_var_type( int var_idx ) const; + // following 2 methods enable to change vars type + // use these methods to assign CV_VAR_CATEGORICAL type for categorical variable + // with numerical labels; in the other cases var types are correctly determined automatically + void set_var_types( const char* str ); // str examples: + // "ord[0-17],cat[18]", "ord[0,2,4,10-12], cat[1,3,5-9,13,14]", + // "cat", "ord" (all vars are categorical/ordered) + void change_var_type( int var_idx, int type); // type in { CV_VAR_ORDERED, CV_VAR_CATEGORICAL } + + void set_delimiter( char ch ); + char get_delimiter() const; + + void set_miss_ch( char ch ); + char get_miss_ch() const; + + const std::map& get_class_labels_map() const; + +protected: + virtual void clear(); + + void str_to_flt_elem( const char* token, float& flt_elem, int& type); + void free_train_test_idx(); + + char delimiter; + char miss_ch; + //char flt_separator; + + CvMat* values; + CvMat* missing; + CvMat* var_types; + CvMat* var_idx_mask; + + CvMat* response_out; // header + CvMat* var_idx_out; // mat + CvMat* var_types_out; // mat + + int response_idx; + + int train_sample_count; + bool mix; + + int total_class_count; + std::map class_map; + + CvMat* train_sample_idx; + CvMat* test_sample_idx; + int* sample_idx; // data of train_sample_idx and test_sample_idx + + cv::RNG* rng; +}; + + +namespace cv +{ + +typedef CvStatModel StatModel; +typedef CvParamGrid ParamGrid; +typedef CvNormalBayesClassifier NormalBayesClassifier; +typedef CvKNearest KNearest; +typedef CvSVMParams SVMParams; +typedef CvSVMKernel SVMKernel; +typedef CvSVMSolver SVMSolver; +typedef CvSVM SVM; +typedef CvDTreeParams DTreeParams; +typedef CvMLData TrainData; +typedef CvDTree DecisionTree; +typedef CvForestTree ForestTree; +typedef CvRTParams RandomTreeParams; +typedef CvRTrees RandomTrees; +typedef CvERTreeTrainData ERTreeTRainData; +typedef CvForestERTree ERTree; +typedef CvERTrees ERTrees; +typedef CvBoostParams BoostParams; +typedef CvBoostTree BoostTree; +typedef CvBoost Boost; +typedef CvANN_MLP_TrainParams ANN_MLP_TrainParams; +typedef CvANN_MLP NeuralNet_MLP; +typedef CvGBTreesParams GradientBoostingTreeParams; +typedef CvGBTrees GradientBoostingTrees; + +template<> CV_EXPORTS void Ptr::delete_obj(); + +CV_EXPORTS bool initModule_ml(void); + +} + +#endif // __cplusplus +#endif // __OPENCV_ML_HPP__ + +/* End of file. */ diff --git a/OpenCV/Headers/nonfree/features2d.hpp b/OpenCV/Headers/nonfree/features2d.hpp new file mode 100644 index 0000000000..f23bec8bc3 --- /dev/null +++ b/OpenCV/Headers/nonfree/features2d.hpp @@ -0,0 +1,155 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__ +#define __OPENCV_NONFREE_FEATURES_2D_HPP__ + +#include "opencv2/features2d/features2d.hpp" + +#ifdef __cplusplus + +namespace cv +{ + +/*! + SIFT implementation. + + The class implements SIFT algorithm by D. Lowe. +*/ +class CV_EXPORTS_W SIFT : public Feature2D +{ +public: + CV_WRAP explicit SIFT( int nfeatures=0, int nOctaveLayers=3, + double contrastThreshold=0.04, double edgeThreshold=10, + double sigma=1.6); + + //! returns the descriptor size in floats (128) + CV_WRAP int descriptorSize() const; + + //! returns the descriptor type + CV_WRAP int descriptorType() const; + + //! finds the keypoints using SIFT algorithm + void operator()(InputArray img, InputArray mask, + vector& keypoints) const; + //! finds the keypoints and computes descriptors for them using SIFT algorithm. + //! Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + void buildGaussianPyramid( const Mat& base, vector& pyr, int nOctaves ) const; + void buildDoGPyramid( const vector& pyr, vector& dogpyr ) const; + void findScaleSpaceExtrema( const vector& gauss_pyr, const vector& dog_pyr, + vector& keypoints ) const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + CV_PROP_RW int nfeatures; + CV_PROP_RW int nOctaveLayers; + CV_PROP_RW double contrastThreshold; + CV_PROP_RW double edgeThreshold; + CV_PROP_RW double sigma; +}; + +typedef SIFT SiftFeatureDetector; +typedef SIFT SiftDescriptorExtractor; + +/*! + SURF implementation. + + The class implements SURF algorithm by H. Bay et al. + */ +class CV_EXPORTS_W SURF : public Feature2D +{ +public: + //! the default constructor + CV_WRAP SURF(); + //! the full constructor taking all the necessary parameters + explicit CV_WRAP SURF(double hessianThreshold, + int nOctaves=4, int nOctaveLayers=2, + bool extended=true, bool upright=false); + + //! returns the descriptor size in float's (64 or 128) + CV_WRAP int descriptorSize() const; + + //! returns the descriptor type + CV_WRAP int descriptorType() const; + + //! finds the keypoints using fast hessian detector used in SURF + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints) const; + //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + CV_PROP_RW double hessianThreshold; + CV_PROP_RW int nOctaves; + CV_PROP_RW int nOctaveLayers; + CV_PROP_RW bool extended; + CV_PROP_RW bool upright; + +protected: + + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; +}; + +typedef SURF SurfFeatureDetector; +typedef SURF SurfDescriptorExtractor; + +} /* namespace cv */ + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/OpenCV/Headers/nonfree/nonfree.hpp b/OpenCV/Headers/nonfree/nonfree.hpp new file mode 100644 index 0000000000..c64c566d38 --- /dev/null +++ b/OpenCV/Headers/nonfree/nonfree.hpp @@ -0,0 +1,57 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_HPP__ +#define __OPENCV_NONFREE_HPP__ + +#include "opencv2/nonfree/features2d.hpp" + +namespace cv +{ + +CV_EXPORTS_W bool initModule_nonfree(); + +} + +#endif + +/* End of file. */ diff --git a/OpenCV/Headers/objdetect/objdetect.hpp b/OpenCV/Headers/objdetect/objdetect.hpp new file mode 100644 index 0000000000..8d7efb0ba4 --- /dev/null +++ b/OpenCV/Headers/objdetect/objdetect.hpp @@ -0,0 +1,1055 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OBJDETECT_HPP__ +#define __OPENCV_OBJDETECT_HPP__ + +#include "opencv2/core/core.hpp" + +#ifdef __cplusplus +#include +#include + +extern "C" { +#endif + +/****************************************************************************************\ +* Haar-like Object Detection functions * +\****************************************************************************************/ + +#define CV_HAAR_MAGIC_VAL 0x42500000 +#define CV_TYPE_NAME_HAAR "opencv-haar-classifier" + +#define CV_IS_HAAR_CLASSIFIER( haar ) \ + ((haar) != NULL && \ + (((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL) + +#define CV_HAAR_FEATURE_MAX 3 + +typedef struct CvHaarFeature +{ + int tilted; + struct + { + CvRect r; + float weight; + } rect[CV_HAAR_FEATURE_MAX]; +} CvHaarFeature; + +typedef struct CvHaarClassifier +{ + int count; + CvHaarFeature* haar_feature; + float* threshold; + int* left; + int* right; + float* alpha; +} CvHaarClassifier; + +typedef struct CvHaarStageClassifier +{ + int count; + float threshold; + CvHaarClassifier* classifier; + + int next; + int child; + int parent; +} CvHaarStageClassifier; + +typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade; + +typedef struct CvHaarClassifierCascade +{ + int flags; + int count; + CvSize orig_window_size; + CvSize real_window_size; + double scale; + CvHaarStageClassifier* stage_classifier; + CvHidHaarClassifierCascade* hid_cascade; +} CvHaarClassifierCascade; + +typedef struct CvAvgComp +{ + CvRect rect; + int neighbors; +} CvAvgComp; + +/* Loads haar classifier cascade from a directory. + It is obsolete: convert your cascade to xml and use cvLoad instead */ +CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade( + const char* directory, CvSize orig_window_size); + +CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade ); + +#define CV_HAAR_DO_CANNY_PRUNING 1 +#define CV_HAAR_SCALE_IMAGE 2 +#define CV_HAAR_FIND_BIGGEST_OBJECT 4 +#define CV_HAAR_DO_ROUGH_SEARCH 8 + +//CVAPI(CvSeq*) cvHaarDetectObjectsForROC( const CvArr* image, +// CvHaarClassifierCascade* cascade, CvMemStorage* storage, +// CvSeq** rejectLevels, CvSeq** levelWeightds, +// double scale_factor CV_DEFAULT(1.1), +// int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), +// CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)), +// bool outputRejectLevels = false ); + + +CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0))); + +/* sets images for haar classifier cascade */ +CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, + const CvArr* sum, const CvArr* sqsum, + const CvArr* tilted_sum, double scale ); + +/* runs the cascade on the specified window */ +CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, + CvPoint pt, int start_stage CV_DEFAULT(0)); + + +/****************************************************************************************\ +* Latent SVM Object Detection functions * +\****************************************************************************************/ + +// DataType: STRUCT position +// Structure describes the position of the filter in the feature pyramid +// l - level in the feature pyramid +// (x, y) - coordinate in level l +typedef struct CvLSVMFilterPosition +{ + int x; + int y; + int l; +} CvLSVMFilterPosition; + +// DataType: STRUCT filterObject +// Description of the filter, which corresponds to the part of the object +// V - ideal (penalty = 0) position of the partial filter +// from the root filter position (V_i in the paper) +// penaltyFunction - vector describes penalty function (d_i in the paper) +// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2 +// FILTER DESCRIPTION +// Rectangular map (sizeX x sizeY), +// every cell stores feature vector (dimension = p) +// H - matrix of feature vectors +// to set and get feature vectors (i,j) +// used formula H[(j * sizeX + i) * p + k], where +// k - component of feature vector in cell (i, j) +// END OF FILTER DESCRIPTION +typedef struct CvLSVMFilterObject{ + CvLSVMFilterPosition V; + float fineFunction[4]; + int sizeX; + int sizeY; + int numFeatures; + float *H; +} CvLSVMFilterObject; + +// data type: STRUCT CvLatentSvmDetector +// structure contains internal representation of trained Latent SVM detector +// num_filters - total number of filters (root plus part) in model +// num_components - number of components in model +// num_part_filters - array containing number of part filters for each component +// filters - root and part filters for all model components +// b - biases for all model components +// score_threshold - confidence level threshold +typedef struct CvLatentSvmDetector +{ + int num_filters; + int num_components; + int* num_part_filters; + CvLSVMFilterObject** filters; + float* b; + float score_threshold; +} +CvLatentSvmDetector; + +// data type: STRUCT CvObjectDetection +// structure contains the bounding box and confidence level for detected object +// rect - bounding box for a detected object +// score - confidence level +typedef struct CvObjectDetection +{ + CvRect rect; + float score; +} CvObjectDetection; + +//////////////// Object Detection using Latent SVM ////////////// + + +/* +// load trained detector from a file +// +// API +// CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename); +// INPUT +// filename - path to the file containing the parameters of + - trained Latent SVM detector +// OUTPUT +// trained Latent SVM detector in internal representation +*/ +CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename); + +/* +// release memory allocated for CvLatentSvmDetector structure +// +// API +// void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); +// INPUT +// detector - CvLatentSvmDetector structure to be released +// OUTPUT +*/ +CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); + +/* +// find rectangular regions in the given image that are likely +// to contain objects and corresponding confidence levels +// +// API +// CvSeq* cvLatentSvmDetectObjects(const IplImage* image, +// CvLatentSvmDetector* detector, +// CvMemStorage* storage, +// float overlap_threshold = 0.5f, +// int numThreads = -1); +// INPUT +// image - image to detect objects in +// detector - Latent SVM detector in internal representation +// storage - memory storage to store the resultant sequence +// of the object candidate rectangles +// overlap_threshold - threshold for the non-maximum suppression algorithm + = 0.5f [here will be the reference to original paper] +// OUTPUT +// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures) +*/ +CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image, + CvLatentSvmDetector* detector, + CvMemStorage* storage, + float overlap_threshold CV_DEFAULT(0.5f), + int numThreads CV_DEFAULT(-1)); + +#ifdef __cplusplus +} + +CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + std::vector& rejectLevels, std::vector& levelWeightds, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)), + bool outputRejectLevels = false ); + +namespace cv +{ + +///////////////////////////// Object Detection //////////////////////////// + +/* + * This is a class wrapping up the structure CvLatentSvmDetector and functions working with it. + * The class goals are: + * 1) provide c++ interface; + * 2) make it possible to load and detect more than one class (model) unlike CvLatentSvmDetector. + */ +class CV_EXPORTS LatentSvmDetector +{ +public: + struct CV_EXPORTS ObjectDetection + { + ObjectDetection(); + ObjectDetection( const Rect& rect, float score, int classID=-1 ); + Rect rect; + float score; + int classID; + }; + + LatentSvmDetector(); + LatentSvmDetector( const vector& filenames, const vector& classNames=vector() ); + virtual ~LatentSvmDetector(); + + virtual void clear(); + virtual bool empty() const; + bool load( const vector& filenames, const vector& classNames=vector() ); + + virtual void detect( const Mat& image, + vector& objectDetections, + float overlapThreshold=0.5f, + int numThreads=-1 ); + + const vector& getClassNames() const; + size_t getClassCount() const; + +private: + vector detectors; + vector classNames; +}; + +CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, int groupThreshold, double eps=0.2); +CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, CV_OUT vector& weights, int groupThreshold, double eps=0.2); +CV_EXPORTS void groupRectangles( vector& rectList, int groupThreshold, double eps, vector* weights, vector* levelWeights ); +CV_EXPORTS void groupRectangles(vector& rectList, vector& rejectLevels, + vector& levelWeights, int groupThreshold, double eps=0.2); +CV_EXPORTS void groupRectangles_meanshift(vector& rectList, vector& foundWeights, vector& foundScales, + double detectThreshold = 0.0, Size winDetSize = Size(64, 128)); + + +class CV_EXPORTS FeatureEvaluator +{ +public: + enum { HAAR = 0, LBP = 1, HOG = 2 }; + virtual ~FeatureEvaluator(); + + virtual bool read(const FileNode& node); + virtual Ptr clone() const; + virtual int getFeatureType() const; + + virtual bool setImage(const Mat& img, Size origWinSize); + virtual bool setWindow(Point p); + + virtual double calcOrd(int featureIdx) const; + virtual int calcCat(int featureIdx) const; + + static Ptr create(int type); +}; + +template<> CV_EXPORTS void Ptr::delete_obj(); + +enum +{ + CASCADE_DO_CANNY_PRUNING=1, + CASCADE_SCALE_IMAGE=2, + CASCADE_FIND_BIGGEST_OBJECT=4, + CASCADE_DO_ROUGH_SEARCH=8 +}; + +class CV_EXPORTS_W CascadeClassifier +{ +public: + CV_WRAP CascadeClassifier(); + CV_WRAP CascadeClassifier( const string& filename ); + virtual ~CascadeClassifier(); + + CV_WRAP virtual bool empty() const; + CV_WRAP bool load( const string& filename ); + virtual bool read( const FileNode& node ); + CV_WRAP virtual void detectMultiScale( const Mat& image, + CV_OUT vector& objects, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size() ); + + CV_WRAP virtual void detectMultiScale( const Mat& image, + CV_OUT vector& objects, + vector& rejectLevels, + vector& levelWeights, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size(), + bool outputRejectLevels=false ); + + + bool isOldFormatCascade() const; + virtual Size getOriginalWindowSize() const; + int getFeatureType() const; + bool setImage( const Mat& ); + +protected: + //virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize, + // int stripSize, int yStep, double factor, vector& candidates ); + + virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize, + int stripSize, int yStep, double factor, vector& candidates, + vector& rejectLevels, vector& levelWeights, bool outputRejectLevels=false); + +protected: + enum { BOOST = 0 }; + enum { DO_CANNY_PRUNING = 1, SCALE_IMAGE = 2, + FIND_BIGGEST_OBJECT = 4, DO_ROUGH_SEARCH = 8 }; + + friend class CascadeClassifierInvoker; + + template + friend int predictOrdered( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictCategorical( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictOrderedStump( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictCategoricalStump( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + bool setImage( Ptr& feval, const Mat& image); + virtual int runAt( Ptr& feval, Point pt, double& weight ); + + class Data + { + public: + struct CV_EXPORTS DTreeNode + { + int featureIdx; + float threshold; // for ordered features only + int left; + int right; + }; + + struct CV_EXPORTS DTree + { + int nodeCount; + }; + + struct CV_EXPORTS Stage + { + int first; + int ntrees; + float threshold; + }; + + bool read(const FileNode &node); + + bool isStumpBased; + + int stageType; + int featureType; + int ncategories; + Size origWinSize; + + vector stages; + vector classifiers; + vector nodes; + vector leaves; + vector subsets; + }; + + Data data; + Ptr featureEvaluator; + Ptr oldCascade; + +public: + class CV_EXPORTS MaskGenerator + { + public: + virtual ~MaskGenerator() {} + virtual cv::Mat generateMask(const cv::Mat& src)=0; + virtual void initializeMask(const cv::Mat& /*src*/) {}; + }; + void setMaskGenerator(Ptr maskGenerator); + Ptr getMaskGenerator(); + + void setFaceDetectionMaskGenerator(); + +protected: + Ptr maskGenerator; +}; + + +//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// + +// struct for detection region of interest (ROI) +struct DetectionROI +{ + // scale(size) of the bounding box + double scale; + // set of requrested locations to be evaluated + vector locations; + // vector that will contain confidence values for each location + vector confidences; +}; + +struct CV_EXPORTS_W HOGDescriptor +{ +public: + enum { L2Hys=0 }; + enum { DEFAULT_NLEVELS=64 }; + + CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8), + cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1), + histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true), + nlevels(HOGDescriptor::DEFAULT_NLEVELS) + {} + + CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, + Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1, + int _histogramNormType=HOGDescriptor::L2Hys, + double _L2HysThreshold=0.2, bool _gammaCorrection=false, + int _nlevels=HOGDescriptor::DEFAULT_NLEVELS) + : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize), + nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma), + histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold), + gammaCorrection(_gammaCorrection), nlevels(_nlevels) + {} + + CV_WRAP HOGDescriptor(const String& filename) + { + load(filename); + } + + HOGDescriptor(const HOGDescriptor& d) + { + d.copyTo(*this); + } + + virtual ~HOGDescriptor() {} + + CV_WRAP size_t getDescriptorSize() const; + CV_WRAP bool checkDetectorSize() const; + CV_WRAP double getWinSigma() const; + + CV_WRAP virtual void setSVMDetector(InputArray _svmdetector); + + virtual bool read(FileNode& fn); + virtual void write(FileStorage& fs, const String& objname) const; + + CV_WRAP virtual bool load(const String& filename, const String& objname=String()); + CV_WRAP virtual void save(const String& filename, const String& objname=String()) const; + virtual void copyTo(HOGDescriptor& c) const; + + CV_WRAP virtual void compute(const Mat& img, + CV_OUT vector& descriptors, + Size winStride=Size(), Size padding=Size(), + const vector& locations=vector()) const; + //with found weights output + CV_WRAP virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& weights, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), + const vector& searchLocations=vector()) const; + //without found weights output + virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), + const vector& searchLocations=vector()) const; + //with result weights output + CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& foundWeights, double hitThreshold=0, + Size winStride=Size(), Size padding=Size(), double scale=1.05, + double finalThreshold=2.0,bool useMeanshiftGrouping = false) const; + //without found weights output + virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), double scale=1.05, + double finalThreshold=2.0, bool useMeanshiftGrouping = false) const; + + CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs, + Size paddingTL=Size(), Size paddingBR=Size()) const; + + CV_WRAP static vector getDefaultPeopleDetector(); + CV_WRAP static vector getDaimlerPeopleDetector(); + + CV_PROP Size winSize; + CV_PROP Size blockSize; + CV_PROP Size blockStride; + CV_PROP Size cellSize; + CV_PROP int nbins; + CV_PROP int derivAperture; + CV_PROP double winSigma; + CV_PROP int histogramNormType; + CV_PROP double L2HysThreshold; + CV_PROP bool gammaCorrection; + CV_PROP vector svmDetector; + CV_PROP int nlevels; + + + // evaluate specified ROI and return confidence value for each location + void detectROI(const cv::Mat& img, const vector &locations, + CV_OUT std::vector& foundLocations, CV_OUT std::vector& confidences, + double hitThreshold = 0, cv::Size winStride = Size(), + cv::Size padding = Size()) const; + + // evaluate specified ROI and return confidence value for each location in multiple scales + void detectMultiScaleROI(const cv::Mat& img, + CV_OUT std::vector& foundLocations, + std::vector& locations, + double hitThreshold = 0, + int groupThreshold = 0) const; + + // read/parse Dalal's alt model file + void readALTModel(std::string modelfile); +}; + + +CV_EXPORTS_W void findDataMatrix(InputArray image, + CV_OUT vector& codes, + OutputArray corners=noArray(), + OutputArrayOfArrays dmtx=noArray()); +CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image, + const vector& codes, + InputArray corners); +} + +/****************************************************************************************\ +* Datamatrix * +\****************************************************************************************/ + +struct CV_EXPORTS CvDataMatrixCode { + char msg[4]; + CvMat *original; + CvMat *corners; +}; + +CV_EXPORTS std::deque cvFindDataMatrix(CvMat *im); + +/****************************************************************************************\ +* LINE-MOD * +\****************************************************************************************/ + +namespace cv { +namespace linemod { + +using cv::FileNode; +using cv::FileStorage; +using cv::Mat; +using cv::noArray; +using cv::OutputArrayOfArrays; +using cv::Point; +using cv::Ptr; +using cv::Rect; +using cv::Size; + +/// @todo Convert doxy comments to rst + +/** + * \brief Discriminant feature described by its location and label. + */ +struct CV_EXPORTS Feature +{ + int x; ///< x offset + int y; ///< y offset + int label; ///< Quantization + + Feature() : x(0), y(0), label(0) {} + Feature(int x, int y, int label); + + void read(const FileNode& fn); + void write(FileStorage& fs) const; +}; + +inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {} + +struct CV_EXPORTS Template +{ + int width; + int height; + int pyramid_level; + std::vector features; + + void read(const FileNode& fn); + void write(FileStorage& fs) const; +}; + +/** + * \brief Represents a modality operating over an image pyramid. + */ +class QuantizedPyramid +{ +public: + // Virtual destructor + virtual ~QuantizedPyramid() {} + + /** + * \brief Compute quantized image at current pyramid level for online detection. + * + * \param[out] dst The destination 8-bit image. For each pixel at most one bit is set, + * representing its classification. + */ + virtual void quantize(Mat& dst) const =0; + + /** + * \brief Extract most discriminant features at current pyramid level to form a new template. + * + * \param[out] templ The new template. + */ + virtual bool extractTemplate(Template& templ) const =0; + + /** + * \brief Go to the next pyramid level. + * + * \todo Allow pyramid scale factor other than 2 + */ + virtual void pyrDown() =0; + +protected: + /// Candidate feature with a score + struct Candidate + { + Candidate(int x, int y, int label, float score); + + /// Sort candidates with high score to the front + bool operator<(const Candidate& rhs) const + { + return score > rhs.score; + } + + Feature f; + float score; + }; + + /** + * \brief Choose candidate features so that they are not bunched together. + * + * \param[in] candidates Candidate features sorted by score. + * \param[out] features Destination vector of selected features. + * \param[in] num_features Number of candidates to select. + * \param[in] distance Hint for desired distance between features. + */ + static void selectScatteredFeatures(const std::vector& candidates, + std::vector& features, + size_t num_features, float distance); +}; + +inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {} + +/** + * \brief Interface for modalities that plug into the LINE template matching representation. + * + * \todo Max response, to allow optimization of summing (255/MAX) features as uint8 + */ +class CV_EXPORTS Modality +{ +public: + // Virtual destructor + virtual ~Modality() {} + + /** + * \brief Form a quantized image pyramid from a source image. + * + * \param[in] src The source image. Type depends on the modality. + * \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero + * in quantized image and cannot be extracted as features. + */ + Ptr process(const Mat& src, + const Mat& mask = Mat()) const + { + return processImpl(src, mask); + } + + virtual std::string name() const =0; + + virtual void read(const FileNode& fn) =0; + virtual void write(FileStorage& fs) const =0; + + /** + * \brief Create modality by name. + * + * The following modality types are supported: + * - "ColorGradient" + * - "DepthNormal" + */ + static Ptr create(const std::string& modality_type); + + /** + * \brief Load a modality from file. + */ + static Ptr create(const FileNode& fn); + +protected: + // Indirection is because process() has a default parameter. + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const =0; +}; + +/** + * \brief Modality that computes quantized gradient orientations from a color image. + */ +class CV_EXPORTS ColorGradient : public Modality +{ +public: + /** + * \brief Default constructor. Uses reasonable default parameter values. + */ + ColorGradient(); + + /** + * \brief Constructor. + * + * \param weak_threshold When quantizing, discard gradients with magnitude less than this. + * \param num_features How many features a template must contain. + * \param strong_threshold Consider as candidate features only gradients whose norms are + * larger than this. + */ + ColorGradient(float weak_threshold, size_t num_features, float strong_threshold); + + virtual std::string name() const; + + virtual void read(const FileNode& fn); + virtual void write(FileStorage& fs) const; + + float weak_threshold; + size_t num_features; + float strong_threshold; + +protected: + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const; +}; + +/** + * \brief Modality that computes quantized surface normals from a dense depth map. + */ +class CV_EXPORTS DepthNormal : public Modality +{ +public: + /** + * \brief Default constructor. Uses reasonable default parameter values. + */ + DepthNormal(); + + /** + * \brief Constructor. + * + * \param distance_threshold Ignore pixels beyond this distance. + * \param difference_threshold When computing normals, ignore contributions of pixels whose + * depth difference with the central pixel is above this threshold. + * \param num_features How many features a template must contain. + * \param extract_threshold Consider as candidate feature only if there are no differing + * orientations within a distance of extract_threshold. + */ + DepthNormal(int distance_threshold, int difference_threshold, size_t num_features, + int extract_threshold); + + virtual std::string name() const; + + virtual void read(const FileNode& fn); + virtual void write(FileStorage& fs) const; + + int distance_threshold; + int difference_threshold; + size_t num_features; + int extract_threshold; + +protected: + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const; +}; + +/** + * \brief Debug function to colormap a quantized image for viewing. + */ +void colormap(const Mat& quantized, Mat& dst); + +/** + * \brief Represents a successful template match. + */ +struct CV_EXPORTS Match +{ + Match() + { + } + + Match(int x, int y, float similarity, const std::string& class_id, int template_id); + + /// Sort matches with high similarity to the front + bool operator<(const Match& rhs) const + { + // Secondarily sort on template_id for the sake of duplicate removal + if (similarity != rhs.similarity) + return similarity > rhs.similarity; + else + return template_id < rhs.template_id; + } + + bool operator==(const Match& rhs) const + { + return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id; + } + + int x; + int y; + float similarity; + std::string class_id; + int template_id; +}; + +inline Match::Match(int _x, int _y, float _similarity, const std::string& _class_id, int _template_id) + : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id) + { + } + +/** + * \brief Object detector using the LINE template matching algorithm with any set of + * modalities. + */ +class CV_EXPORTS Detector +{ +public: + /** + * \brief Empty constructor, initialize with read(). + */ + Detector(); + + /** + * \brief Constructor. + * + * \param modalities Modalities to use (color gradients, depth normals, ...). + * \param T_pyramid Value of the sampling step T at each pyramid level. The + * number of pyramid levels is T_pyramid.size(). + */ + Detector(const std::vector< Ptr >& modalities, const std::vector& T_pyramid); + + /** + * \brief Detect objects by template matching. + * + * Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid. + * + * \param sources Source images, one for each modality. + * \param threshold Similarity threshold, a percentage between 0 and 100. + * \param[out] matches Template matches, sorted by similarity score. + * \param class_ids If non-empty, only search for the desired object classes. + * \param[out] quantized_images Optionally return vector of quantized images. + * \param masks The masks for consideration during matching. The masks should be CV_8UC1 + * where 255 represents a valid pixel. If non-empty, the vector must be + * the same size as sources. Each element must be + * empty or the same size as its corresponding source. + */ + void match(const std::vector& sources, float threshold, std::vector& matches, + const std::vector& class_ids = std::vector(), + OutputArrayOfArrays quantized_images = noArray(), + const std::vector& masks = std::vector()) const; + + /** + * \brief Add new object template. + * + * \param sources Source images, one for each modality. + * \param class_id Object class ID. + * \param object_mask Mask separating object from background. + * \param[out] bounding_box Optionally return bounding box of the extracted features. + * + * \return Template ID, or -1 if failed to extract a valid template. + */ + int addTemplate(const std::vector& sources, const std::string& class_id, + const Mat& object_mask, Rect* bounding_box = NULL); + + /** + * \brief Add a new object template computed by external means. + */ + int addSyntheticTemplate(const std::vector

X5S#+RMhH%aU^4_~LvStxkB8tw2rdaja2W(og5aqTJRO43u`?lf4g}AK z;6)I;6oOYk@G1zlLhyPB-UPul5L^er4G_E)g119(3k2_m;JpyMAA%1-@DT_;2EivG z_zVP}hu}+5U#~#$H3+^5!QBvi7lQjC_#Om5gy1I-{2GGaK%oVNX;7E}g_TfP3xxxr za2gcOfx>xEcqtUNLg95#xE%_2K;h$1xDN_HhQd!Fq(CShLK=ial@O|h&_D>yhR{L? zEr!q~5V{6J*FxwC2)ziQmm%~c^pK!O9P}6hJ;p+hanNHG^f(uKoDV%7h8|Btk7uFB z=g{L@=!$%2v~l!TyU zL>Nlyp=2VIEQOMlP_hb2ZibSLP;x7j?0}NzpyUN8`4~zLL&*^+?FOaUP?`&+qoH&X zlum)t1HV11*Pvm>AO(+1C#+}VHcETKv@Bl1)*#>l#Pe72~f5S z%1(!}Gob87D7yv9HbU96P__%oUW2kPq3nAo`w_}Bpu7OegHS#i$|phj6evFh$_134 z1?6`@`F&7+Ka{@=;qtejd^c2dh6+Da7*J6G6@8(iKUBAyk|T6_-K96;SaY zR6GF{PeH|AsQ3sfK7ooqpl4_3*%f-`K+h2LEP|esq33bXb0+j$2|dq-p63##=SJwc z6?)zcJ>P_$`=RGS=;eZ59_ZzRUir|g7bG=+g>)u7f`JK%a-9&!f<1H}rWA`g{O= zJJ}VCG>k5`t5{%yP&@g{ga`8D)gTZ{g*=j<IVEe-vtcP-{bN zBGk@?+J#WNI1IH9L+#U0`z#DFU_c5CNP__rV8ApOFarkM3f`c3|)eF!*&C{3Z-hVMrnj=>|g@V8|>O(gH(P!;nod zWHSso3`2f`A-}-T8W=hlh7N_HXTs2nVCW?f9=ZpHegH#1f?Hm74BHRG4#MzM7@h~iyTkBi7=AnqUjV~b!|+Wod@~IH5QcvR!w@a|MjK9>&}VV?Kg0hhWUtFt#_09SCCw!`KU8?3FO~Dj54djQs+}eg$~2El~U zFkviAxC$oR1QS-nghLRX@FPt4875AKiO0djnK1DonD``2d>STofk`?{GGWp*m^23_ z&4Wp|!K8a&(tR-LCz$Ml$udkH2$M&_ViVaisR@&HVE2&Vi7Q{6DNGfW*1QyXFGG?;oTOuZYX-UCy=gQ>qm1JFfBs7eIhSAW_01ZvhFc%u;L&FKsun-y+L&H*NSQUnb2sE4p4d+6`1<-IYH2e=5 z-iC$`q2Xg__zW7pfQCcRa0D8Dfrj6p(FKhPGIbZ9&c8qb2pbD;5jXuJp-Z-mCx(AWlz>!EQY zG=2k(zd}8WpE(sl^6MCw25_k_^<~e81%suzx+~>|bx0mPk^ISPC(Xs+9E77tFEvwP8CN1ko zXjz|@4Qbh!mKV|T5?bCw%e!fLFD={BvNJ8a(sDE{-=yUPT7E{$Z)v%TmV0PfK+7Up zRi#y3TGgl3W3+mXR;_6DKU&SE)m&N~pj9cYjwDE{Nm>)qnvvF=v=*dYLE4q1-9XyE zNxO-(TS&W&v^zpiqCpmh;# zD$}MGZR*hGa@t%=o9k)w7;T=TO)J_Ar_DIpyiS|%{JOLq3y-A zy_B}k)3yg~d(n0gZI{#bbJ}jCZ9Z-H(RM#=5771)ZBNjy0_`f%t_tm{(XJ-#>d>w! z?OM?8LfTzIyUS>oxSV#+(e4%6eMGx_+SjN3RkXi}_P5afcG}-X`+I5sAKItWz7y>; zY2Tgpy=dQ;_QPmDoc5z>|0?ZYr~L%lzeD>;w4Y7;<+NW(`&`=+*V!D zCSw{I*<`#&#w;@ClJPznACj?{jE~4zM#g7ktRUlSGQK6_doq3^<5x0r$yiIq1~N92 zv6YM+Wb7tmFBu2OI7~(n8Kq<#CF6vIjMHSCAv4HKBC|4?RmrSQW-T)7l9@tgDw$2l zY)<9{WL`|>pUM0ynSUemDl)Gj^LjFGAoE5tZz1z`GVdbuUNZkf=0jvYO6Gsbd@4ca zvt+g+vo)FR$m~dF2AN&R>`rDcGW(MG0-1xzJlFFQnZwB(N#+p+V z>D-LY=hOKwbiSO<57PNbIzLV49&{c+=RtISo6i5Eb2gn<(RnqU^XObm=aY0kO_v6A zX+f6@=yDTX?xxGVbm>8t0dyHem#K7_DWS`3x_n2M-{_J{m(z4jqH86(-b>d<>H0Wb zhthQvUB}S%OS=9**PrNGNY~?ZJxRCo>Gl`8T~4=$>Gm|;o~7Gpy1hxa33U66Zr{>v z72S3w=ys59hv|ME-Rsdkh3?nV{U*BKLibj5?@0GfbbpKPQ|bOcx_?9WpXvT9-Am|x zitc6fxR@S)qsRZz;{kd+L64{C(T5&G=rNQY+4Pu8kHma>d_#|)>G3N)4$|WfdK{%^ zQ+i%P&p*@iVR}AI&u8h`kDf#6`4TJizY6{9(7zu2uc7~q^uL+@UFhGJ{{87ch5j?>Ka2jW=)ao&dGt>d(fyAPcf`D!`d=z9>bO}>?4L9VOTlC z&hqljynGKY-^a_ZaPGe93Zoxn^plK!n$c4jJ%iD+7`>0t#f&ax%+-wf7h`T@OfSX^V$2Z6tV%Fu zHDmG^+la9jGWKG|zQouujD3}{YZ<$hvD%XxJruYSk4 z28?ULxC0unHLqpxT4!Ec&TA`q?K@tt%FEV+G?YGQI}mYcc*| z#y`#YXBq!GiSh3;elp{CGyX8+3wiTq-n@r5@8ivvcykPIzRH{Hcyk+X?%=H^ymc{e zUCLXXd8-d^_2aEoytSIQ@|bWX6aK-38<_AW6Q(d>8WVOh;UE(ZOL+TQ-oA;qZ{h7> zygin;$MJS9Z*S)9-+8Ao?_9(?m+($|-s#Fa-Fas|?=0n=k9p@f6EX2mOuUJScQf%` zCXQs{8%%tYiR+lSjfp#W_b0K=COyogN0`)#N$E_=WKvfq^|045D*(nuzaVbVAzy}_inn6#2fMNCdK zWAXz`9?ax-nLLxpvza`P$qSggkjYD!{5_L%nY@O{8=1U?$=jHGh{**^E@kpjCZAw( zIg`&a09nraaA*K1>&X5m+5aZ{X0mT1`%bd&A^U!^A0+z`vL7e=NwS|IyCvD_WVa=|1KFL(?o4(!vU`%< zhwT1j50sESgzOi|ewpl7$R17ht7N}U_M2qCP4>HFPa*q%Wap4QgY4O4&m(&Q*$c^D zLiWdGe@gb}WPeHaH)MZD_77zLO!jYN=aIdR?2Tk^Nszsb?44xqA$uR$2gxoVyO``h z$Ua8)NwUkxK1-+Nb2mBnk@EmK50mp4IZu%DG&#?alSWP(a@v#gJUN-Ax`jH>Mxw zJ-qiP-n&r3dzbUx-+1poy!T(;dy@BFGP5x=Z)4{D%zPlh%#O_L&di?7e1n;j zm^p=+%bB^7ncp#U4>JpxS;VYH%({?S7c=W#W07Kd}c3T_IhS-XZB8J zmoujlbCQ{JC3F74oEw<)U*@!8PC9djFy|HKjAG6#=6uMUMa;=%&SvKPes0h`#oP+a zt;pOPcU}~b6;WZDCW*%?h@vH#N74F-Ok*d%sY>H^_Z8!yvvz) zE%UBtUI*rNV_pyDy~(^O%$vr%FPZm)gn2(PZx8bdm{-L7hRi>o`4=+(U(COQ`FAnD zCG$Hl|9R%W#QZVLf0g-5ng0dzzhwR<=I>(u9^S9W`!#sK7Vlrf`#19b&Agw^`<-|{ zllO-wcz+!4zs~#fd4DPIf6V(^c>f&8UKad`1?RD#It%{Bg6moEPZqRhK?Vytv*0xr zOk}|%7JSNrZ&>gx3;tk184J$v!3BKqS3bCc4<6uyC-@-o6d&~AgCTq{ln)m2!KZxi z86WK8gF}2!z=zHF@KQd!j1M2;!>9Q089p4$ha>oKBp)v1!%z9}Gd|qQhedo?!oq4S ztk1#*EWC+@ceC(b7WO2ua3Bi@vv4*G7qW0M3)islcNT7AQ8J5av#2hM{>7p@SacVQ zGFjA{MSWQ`g+()1G>b()uqcm3Ygt^E#Z6e;jKwWk+=0c|o_mj24p4J_TnM;GzY6?}9hA9d%W7x-u(AFbk} z)qIr4$94F)F&{VO<4iv8&BuND_-j7?iI0Eblg4~<5uaSbCq4LN0G|xvlSO>8oKHUI zlSCQIDzdCH%kE{_qbz%zWv{VpBFiSRY#Yn=vFre!Ud5+3@aez#bO4_Y}w^42V$&hq&zU%>LcEI;S}E-&G;%lPbSKD&m`y6{1` z-{8whd^v?L*Yf37zTD1N7x2|z`RWS3>cUrj`Kmu(eZp5?@zpncb&{|D#MkHa|JM)j z^%H#k6kliY^&GyQ$2ZA*Q=4z<^35}R)0S`A^UeEw^AX>C!b+^H!pielc`GaLW95HX zIhB<&Svi}PMXWr*%2RxM2j4!xw-52{M7~XA^X+uL-N(1Zd|S$QxANV6eD@!|dzJ6r z=DUe}w}tQa@ZDZkUB{}MS#>L`MzLxM zOBH^p!!PytUFGM&*}}V-oxs>CxlfS$G`WfA z$W0@+4Y}>feV*J*a=Vh-gWTTa_9J%yxr50aO71XnN02*;+_B`oM(%iWCy+al+{xrl zBR8Ae_sE?^?lE#tl3PaZS@QlwUPTgl$>g0!UJdeUlUI+t2IMs&uPJ#g$h(ldOUS#7 zyvxbElDwqp)I@&=JNoV-`a`fEqUw7+fLq2)?CV(Ygls~YyQcaf3fBc*4)LK zcUd!&HM3bWk2UYJ<|Ed8!kS{%l(Xh6YyXsBZ4zs%v$iH{?`Q2(tbK;HEm@n!+UHrD z!P*?we!$v=tX;y|k662cwO_IJIO~#FSBZ61Sa%-l>ai|`bq}+yCF{~y*M@cNSl5Mh z-B_1c%(^dG_ciOjW!);)t!7;w>r+^NA?q(@{hwL?7uNrs_1Cg~AnQl6el+V}W&LZc zpUC=2tY6Lgt*qb9`dzHw!}YZ zt(DnYkF6%=+iSDEF56Ss-jMC* zv;9K0_hS1{w!g&o;cS0}?XRP581hao%!sl&aM=8 zwP4q!?7D_s*RktncHPFVJK1#)yY6S#zU+FDUE|pGI=kLw z*A#aBk6k(Jn!&CG>{`gKCG7f`U7xb+8+NT`*JgI@W7h$8m9pz7yH2p{G`r5QyE410 zvb#FFYq7g7yZ=UF_kY>lmfd66{T92YvU>)*7qNQ@yFX|5m+bzA-QThM2X^PNdmXzs zvU>}=x3T*qds?vPZ|r%3J!$NDo;`io(~mtv*)xniBiJ*FJ!9E3LBgJi?3v7-Y3#{n z&s_E_X3uBr`GGw@v1cuNHn3+id$zJ?2YU{%=P-MU*i*`$qwG0D{(0orC;tNSFCza6 z^8b(gzmtC*`Tr#UHuCQz|DFW-_mlr1`A?FcPJSoy`;yy1!1>_f#{|EVrW8|M@Z!&vp zv$q+0Td?;s_Fm53E7^NBd#`2hP3*muy?3zpZuZ{C-pAPc9DCcdw16Tgbi?_BChUzDL;iIQyPt z-!trM%f1fm>%_j!?CZw9(d=8uz7_1-$G&3rm9f7P`|Gm5KKq-o|9tje#Qsa!e{TPC z|KHhv9sB>u{(rOoX7)eB{ukN*8v8$H|Cj9lk^O7gzmxsD*?*Y*MeHwS|55gz;6QL7 zi3626P?ZDKIgrAE3psEF2X5fNzd3Lx2kzm({Rs{{$bm;V@C*l9av+@pZ8^|^1Kl_< zkORXxFrEW%abPM3vN$lE12Z`=hXac^u#^Lza9}wHzTm(n4&q=94qnH>n>qLZ2cP0# zdk%KwV4^z*dvUNY2VdaeAPx@a;7AUR;ovw9zQMuy99+Y}ogAvdp*kFD&Y{aVbRCEO z!J%6@bO(p-=Foi{dVoVuaOi0cJ;$Lm4z=OXU=F>R|9TeP6 z!F?1wMnM_{ohax_L4OKfq~K)=UZG$#1+P-@HU;leFolBuQIM0MU>*gZQ1C4Ut0-7a z!5RwIQ?QAG-zmtaU_S+iC@7?$go0BPCQ(?E!X^|pqwrD+|3cvv6#gHDf2Z)@6y8kX zZ4}-~;XM>SLSah^6VFrFhr)go4yAAyg(D~&Md4TqCr~(%!pRg)qcEGoxfFgx;R*_W zrtnt^H&D2l!mSkUpl~;Zhbb(gu$0216rP|cD5^nGDn*x&DEc!+S5b5gMb}ew14TDd zbQeYUQuH5+9-`<`ik_wDd5U^bG?b#3C>lf2IEvn&=q-xgp(u-@=@iYRXbwg5DOyU= zR}}q3(MF0kQ?y$`(O!xUP;{80B8twjJWbIVii6@LimOxHh~kSVzKY_%Q~Ym=Z>IP* zitnWO9*Q5K_;HG#r1%+%TTXf8V(wve%Q*tdO*Hdx} zCAU*@7bW*nlK2lL|E1(9N}i>p6(y}H$)Kb+B||6~L&>X@yiLiwluV)If0X1@+Bp|P_luNJ(TRFq==GIN{&);j__$plSq_SrnD-h)hVq-X)2``PrB3slbs+ z97*O#ZI0CC$ip0I$&oaUwBblQj&$KjH;$aVW zy@jKHi~0o zIX0VP3puu!V;^yB8OP4ue;@mWW2-r~hGXkE{xrwiaJ(DGdvLrr$NO=70LKS&{9TUc zaQr=v&*J!8jxXo<7aaeJ<10D-9VaSqq8cYsInkIC%{XyBCobZ|rJVRb5+|CG z2Pf|3#D6&P5GNkx#N(XU!HEN$IL3(+oUF*nWKN#P$r_xj&B=P4Y{1EjIC&{2|H8>D zIQf5^{5vPFdWKPIcwfbWVN9sYRSx%Bhbzb(m8pIdz&-XE+_4PU3WBPG89B z%M+ab8>g@0^fjEmm($O2x&x=5=X57dcjk0APWR;Wx%xLbJ(JUOI6aTkOE~={r@!TN zE~hth`VUT@qO2}umr-^dW&fb;UzFWM*{zh_LD{+azpN8wnUr;u%HD9XlC z_8Mj5Df=H~Ih4Ie*+R+|Q}z*M%P9MdvK5qlPuY)@{Y2SD$~IHBm9ia_?WSxmWkr;g zPTFNQC@@cnv~a}ygua(DZiNV>nXp1@*63?neuxmf0**eD1U`cp zC_hYj5#^ZXK=gdW%xs)^4aOQf>{F5_xapoS*+|QW@ zIr9i-9#3$l4QD!Y<^|3S&L%iJlCz^Z`zB}KK9_r!vr{;ld%hJuJuZ@3illUr z)JY_D7fF3Y(kPKMMkGxTNz+79mPnc|l4gjcMIvd5NcvVJeJ_%J5=p;`q+F4-RwQi@ zNt;E|R*_UFl1fC?SJq z6cu}mil2&#t3<^gM8%&)#a~6Gs-jW@Q7KhaY9cB%6O}F%l`a#N+KEa%M5SJ$QeROi z(O*=0QB)cxDt#&{trC@f5S4xwm3|eKYlzB?MdhZVatl%U0#Ui6sN7pr?kg$}6qSdF z$}ft_BShsu?4w0NBk}Hek>LNKsBsUkymy6`TiR8bFp_98h$BzFUAk=$P*k_U?97e(?&k^F{8o+OfIiR3vVd4Wh? zD3X_m8&|i7Ky)D&s|!38Kn7qDq#ik|U~Y6;%$3Du+dtB2lG8R5>N8l!>Z; z7gZCth^n`Vs&|U2cZ;eIi>i-`s$)deiK6NxQFW@Q`ae;1wx~K+RNX167K*CHqUs-_ z>QQmtU&VR<5a-<>&bv{Zce6O}C2`(s;=DJ+d2fmH-WJs^5Y?^})vglNZWD=WcZzEF zh-&wXY7dHPkBDlIi)v4bYR`yjnW9=(QLTrl)>~BTC#t<5stpp=hKOn}ifUhrYPq7? zQBl3NsD6v6-bPd(BC5YDs?QYFXN&5~MD@=^^%bJ}*P{Bj5>fqoQT-=T{Z~;vS5)66 zs^^R9`$Y8vqWWP`y+~9q71fW3>c>TmilRnCQR6C6qqV5fThy2?YJ4PWd@XAHCTir0 z8oNY|d{JY+sBuWtC=@kHM2#b&#yOl*2~qPrQL~1qSzFYsD{9skH5-bWjYZ97qUQOc z=H;U1oucMrqGl^mv$v@Ej;J|L)ciu!{7uw6C~75%TB)K|6H%*~sCB-mb&;rbqo|cG zYIPR1dWl*IQETM6cZ{etPSkos)Ou6Ynk{NA7qxyAwYG{{Cq=C?QR}Rz{U=eoqNrV2 z)c&WaeVeF#hp2tOsQsX*{fMajxTxJy)P7#n&J?w~irNE3?UzOE>7w>*QG1oBy;{^> zBNDaOi`rX7?Y*M*0a5#~sC`V-sUYf96m^nCo%2MUdZJE>sPl-Z^Ngs|UDW9#>WmR} z-V$|Ih&n4pogz^;S=6m6>Q)nVYl^ycMBT=sZc|aWg{XU>sC&7n`-DW)eOA;RF6xdG zb(e^`%S7GnqV8c)w@}nA5p|Do zqTVB--czF9v!Y%rQLnYA_q?c=De83*^}36CQ$)Qb2~qE7QE#27w?Wj~Eb46)^$v=9 zhef^9qTU%%KScc`QNOaNUscqvF6!43^_z?O7l`^7i~4^S_5UjB|4r1tO4Pqb)W22K ze@@hYUeq5X>Q5B)KN9u77WKD@`iVb8{bQp3Nm0K{)ITdyLZnm`Db++uU6GO^Qc^`q z6OnSUNcpo!`Kw5|MxhZvl=dR!d6Cjbr1TRhLq*Ck zkupN0j1q~Iu_9%HNSP>7CX1A5A|+d-%oQmgi?R=5)BrL2FpZ)m7>8`(cqwHa9A`b5)Dd4gQKFsanZ1zXn4M8c%f)`iD-D4Xn473 z_&3q;Hqr1Q(eM$`@Nv=bNzw2b(Xgdx*h4fNAQ}!54PO)uUrvaIuZV_kiH2{BhLc6Z zX`*4aXgE_eoFf{}7Y#oZ4Sx|0w}^&&M8gu%@DI`OglKqLG(00xD~QxeBDIQ0tuIoW ziPXP})GI{l-$d$_BK6-Q^(K*et4O^=q$chbsrQN0$3*IXMd~vmwWUZ+7pYxDYB!PE zTcq|AsRPb6h}4%v>Tr?znn;}}QfG+NSt515Nc}*hE)uC9i_}j=>gOW$H<7wgq~?p% zeIoUsNG%Yl#UhdVhe$0Gsb@r^3ZhXZ(Wr`Olp-266pfmSMlD363q_+pi$;GHjs7MY zT`3y6t8t)X1_lU;l>JN&>1)_1WX#9t0d`vW{m=H~pMU(SHlj@>L zP0^&bXi`r!X&{>1B$}j)CL=_XxuVG`(d3Y5az-?*ESgpkO{ZXf{tY`%pAnE}E?r%~pwKt3|Ut zqS+zQ?5t>BRW!dsG`~SK|F=jqe@HZcR5bsuX#SLF{;X);N;GdRnzs|pJBsERqWNIa ze5hzXOf(-MnvW9A$BO2!iRR-)^9iE)MA3Y)X#TZmzFssxD4L%XEozGv7mF6RiWX0Z z7F|S(zM{no647FSXfaH*cwMx3N3@tKTFewJ=7<(^MT^Cv#ZuAYBhg}o`2Uv9|Hbb) z4&aZ+nmMkNx=vT<{eHckuji+gxVos(A+=gvYNdplk(zBv$&h3pi)KS2B8wx^rs>D0>9bR;BNEvHR#S<8rd{POtq@gF?8dF(#L?jUwwV0Q?+@38v;yCc{g#cm0^ zW7sWY_iw;S>`q}nf&C=*>#^T}{l+2oo3XzG`#Z6}3;S)@KZyN9*guT@PV9GM{|fdC z*e_y#6#HfD|HS?z_NTCjWhItzEEP+`l2`^1q z5bHG7Td_Wc^);;T;IO`j^+T-tu+Cwf$9fp+k63@gdIalHtV>vrVLgGd3S$Ig6yqF> zb1}v+dW?#Z7^^WZ!ng!u5@QMX28>%UZo}AwaW}?27~3&+ zU_65HD8^$L&tN=<@dCz+7%yYIjxmcdhp`{yGmOtM4q|+XaTsF}<9m!>Fn+~2KE(JP zV;SQFwpG|hu$_hNY;5OYo50p#tJo6TYHX9(rm$UxZ4I`K*mhxi6Wd;F`>@Sno5ywl z+XA+)uq|Ty8rwJ6j$<4Ch3#+56_{sWj$n>rj$w{tUVzzQR?Nhl!n_poa?EMWt1;JN zuEU(cyczR$%x#$WVm^Sm9diffPR!kyPhjrBd=pY%I5-~%7vo?G2W$TOQm}HxX^$_R_WwAz;vWI2WflMc diff --git a/main.cpp b/main.cpp index 455ae556fe..21fa1edf39 100644 --- a/main.cpp +++ b/main.cpp @@ -55,6 +55,7 @@ using namespace std; // Junk for talking to the Serial Port int serial_on = 0; // Is serial connection on/off? System will try +int audio_on = 0; // Whether to turn on the audio support // Network Socket Stuff // For testing, add milliseconds of delay for received UDP packets @@ -89,7 +90,7 @@ Hand myHand(HAND_RADIUS, glm::vec3(0,1,1)); // My hand (used to manipulate things in world) glm::vec3 box(WORLD_SIZE,WORLD_SIZE,WORLD_SIZE); -ParticleSystem balls(1000, +ParticleSystem balls(0, box, false, // Wrap? 0.02, // Noise @@ -112,7 +113,7 @@ ParticleSystem balls(1000, #define RENDER_FRAME_MSECS 10 #define SLEEP 0 -#define NUM_TRIS 10 +#define NUM_TRIS 100000 struct { float vertices[NUM_TRIS * 3]; // float normals [NUM_TRIS * 3]; @@ -154,7 +155,8 @@ int display_hand = 0; int display_field = 0; int display_head_mouse = 1; // Display sample mouse pointer controlled by head movement -int head_mouse_x, head_mouse_y; +int head_mouse_x, head_mouse_y; +int head_lean_x, head_lean_y; int mouse_x, mouse_y; // Where is the mouse int mouse_pressed = 0; // true if mouse has been pressed (clear when finished) @@ -194,7 +196,7 @@ double elapsedTime; // 1. Add to the XCode project in the Resources/images group // (ensure "Copy file" is checked // 2. Add to the "Copy files" build phase in the project -char texture_filename[] = "int-texture256-v4.png"; +char texture_filename[] = "./int-texture256-v4.png"; unsigned int texture_width = 256; unsigned int texture_height = 256; @@ -261,8 +263,10 @@ void init(void) { int i, j; - Audio::init(); - printf( "Audio started.\n" ); + if (audio_on) { + Audio::init(); + printf( "Audio started.\n" ); + } // Clear serial channels for (i = i; i < NUM_CHANNELS; i++) @@ -273,6 +277,8 @@ void init(void) head_mouse_x = WIDTH/2; head_mouse_y = HEIGHT/2; + head_lean_x = WIDTH/2; + head_lean_y = HEIGHT/2; // Initialize Field values field_init(); @@ -346,7 +352,9 @@ void terminate () { // Close serial port //close(serial_fd); - Audio::terminate(); + if (audio_on) { + Audio::terminate(); + } exit(EXIT_SUCCESS); } @@ -421,6 +429,9 @@ void reset_sensors() fwd_vel = 0.0; head_mouse_x = WIDTH/2; head_mouse_y = HEIGHT/2; + head_lean_x = WIDTH/2; + head_lean_y = HEIGHT/2; + myHead.reset(); myHand.reset(); if (serial_on) read_sensors(1, &avg_adc_channels[0], &adc_channels[0]); @@ -431,12 +442,14 @@ void update_pos(float frametime) { float measured_pitch_rate = adc_channels[0] - avg_adc_channels[0]; float measured_yaw_rate = adc_channels[1] - avg_adc_channels[1]; - float measured_lateral_accel = adc_channels[2] - avg_adc_channels[2]; - float measured_fwd_accel = avg_adc_channels[3] - adc_channels[3]; + float measured_lateral_accel = adc_channels[3] - avg_adc_channels[3]; + float measured_fwd_accel = avg_adc_channels[2] - adc_channels[2]; // Update avatar head position based on measured gyro rates - myHead.addYaw(measured_yaw_rate * 1.20 * frametime); - myHead.addPitch(measured_pitch_rate * -1.0 * frametime); + myHead.addYaw(measured_yaw_rate * 0.25 * frametime); + myHead.addPitch(measured_pitch_rate * -0.25 * frametime); + myHead.addLean(measured_lateral_accel * frametime * 0.05, measured_fwd_accel*frametime * 0.05); + // Decay avatar head back toward zero //pitch *= (1.f - 5.0*frametime); //yaw *= (1.f - 7.0*frametime); @@ -456,6 +469,7 @@ void update_pos(float frametime) head_mouse_y = min(head_mouse_y, HEIGHT); // Update hand/manipulator location for measured forces from serial channel + /* const float MIN_HAND_ACCEL = 30.0; const float HAND_FORCE_SCALE = 0.5; glm::vec3 hand_accel(-(avg_adc_channels[6] - adc_channels[6]), @@ -466,6 +480,7 @@ void update_pos(float frametime) { myHand.addVel(frametime*hand_accel*HAND_FORCE_SCALE); } + */ // Update render direction (pitch/yaw) based on measured gyro rates const int MIN_YAW_RATE = 300; @@ -596,6 +611,7 @@ void display(void) glTexEnvf( GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE ); glEnable( GL_POINT_SPRITE_ARB ); + if (!display_head) { glBegin( GL_POINTS ); { for (i = 0; i < NUM_TRIS; i++) @@ -606,9 +622,9 @@ void display(void) } } glEnd(); - - glDisable( GL_TEXTURE_2D ); + } glDisable( GL_POINT_SPRITE_ARB ); + glDisable( GL_TEXTURE_2D ); // Show field vectors if (display_field) field_render(); @@ -618,10 +634,10 @@ void display(void) if (display_hand) myHand.render(); - balls.render(); + if (!display_head) balls.render(); // Render the world box - render_world_box(); + if (!display_head) render_world_box(); glPopMatrix(); @@ -634,10 +650,10 @@ void display(void) glDisable(GL_LIGHTING); //drawvec3(100, 100, 0.15, 0, 1.0, 0, myHead.getPos(), 0, 1, 0); - + glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, pointer_attenuation_quadratic ); + if (mouse_pressed == 1) { - glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, pointer_attenuation_quadratic ); glPointSize( 10.0f ); glColor3f(1,1,1); //glEnable(GL_POINT_SMOOTH); @@ -648,7 +664,7 @@ void display(void) sprintf(val, "%d,%d", target_x, target_y); drawtext(target_x, target_y-20, 0.08, 0, 1.0, 0, val, 0, 1, 0); } - if (display_head_mouse) + if (display_head_mouse && !display_head) { glPointSize(10.0f); glColor4f(1.0, 1.0, 0.0, 0.8); @@ -737,7 +753,7 @@ void key(unsigned char k, int x, int y) float add[] = {0.001, 0.001, 0.001}; field_add(add, pos); } - if (k == 't') { + if ((k == 't') && (audio_on)) { Audio::writeTone(0, 400, 1.0f, 0.5f); } if (k == '1') From eb648d3d1f85aed5c18366df8280cb3c4814cfb7 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Sat, 17 Nov 2012 17:00:56 -0800 Subject: [PATCH 007/136] Partial commit to switch sprite rendering to new Cloud class --- cloud.cpp | 132 ++++++++++++++++++ cloud.h | 32 +++++ field.h | 11 -- head.cpp | 1 + interface.xcodeproj/project.pbxproj | 6 + .../UserInterfaceState.xcuserstate | Bin 101447 -> 101370 bytes main.cpp | 52 ++++--- 7 files changed, 204 insertions(+), 30 deletions(-) create mode 100644 cloud.cpp create mode 100644 cloud.h diff --git a/cloud.cpp b/cloud.cpp new file mode 100644 index 0000000000..a0bdcceec3 --- /dev/null +++ b/cloud.cpp @@ -0,0 +1,132 @@ +// +// cloud.cpp +// interface +// +// Created by Philip Rosedale on 11/17/12. +// Copyright (c) 2012 __MyCompanyName__. All rights reserved. +// + +#include +#include "cloud.h" +#include "util.h" + +Cloud::Cloud(int num, + glm::vec3 box, + int wrap) { + // Create and initialize particles + int i; + bounds = box; + count = num; + wrapBounds = wrap; + particles = new Particle[count]; + + for (i = 0; i < count; i++) { + particles[i].position.x = randFloat()*box.x; + particles[i].position.y = randFloat()*box.y; + particles[i].position.z = randFloat()*box.z; + + particles[i].velocity.x = 0; //randFloat() - 0.5; + particles[i].velocity.y = 0; //randFloat() - 0.5; + particles[i].velocity.z = 0; //randFloat() - 0.5; + + } +} + + +void Cloud::render() { + + float particle_attenuation_quadratic[] = { 0.0f, 0.0f, 2.0f }; + + glEnable( GL_TEXTURE_2D ); + glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); + glPointParameterfvARB( GL_POINT_DISTANCE_ATTENUATION_ARB, particle_attenuation_quadratic ); + + float maxSize = 0.0f; + glGetFloatv( GL_POINT_SIZE_MAX_ARB, &maxSize ); + glPointSize( maxSize ); + glPointParameterfARB( GL_POINT_SIZE_MAX_ARB, maxSize ); + glPointParameterfARB( GL_POINT_SIZE_MIN_ARB, 0.001f ); + + glTexEnvf( GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE ); + glEnable( GL_POINT_SPRITE_ARB ); + glBegin( GL_POINTS ); + for (int i = 0; i < count; i++) + { + glVertex3f(particles[i].position.x, + particles[i].position.y, + particles[i].position.z); + } + glEnd(); + glDisable( GL_POINT_SPRITE_ARB ); + glDisable( GL_TEXTURE_2D ); +} + +void Cloud::simulate (float deltaTime) { + int i; + float verts[3], fadd[3], fval[3]; + for (i = 0; i < count; ++i) { + + // Update position + //particles[i].position += particles[i].velocity*deltaTime; + particles[i].position += particles[i].velocity; + + + // Drag: decay velocity + const float CONSTANT_DAMPING = 1.0; + particles[i].velocity *= (1.f - CONSTANT_DAMPING*deltaTime); + + // Read from field + verts[0] = particles[i].position.x; + verts[1] = particles[i].position.y; + verts[2] = particles[i].position.z; + field_value(fval, &verts[0]); + particles[i].velocity.x += fval[0]; + particles[i].velocity.y += fval[1]; + particles[i].velocity.z += fval[2]; + + // Add back to field + const float FIELD_COUPLE = 0.0000001; + fadd[0] = particles[i].velocity.x*FIELD_COUPLE; + fadd[1] = particles[i].velocity.y*FIELD_COUPLE; + fadd[2] = particles[i].velocity.z*FIELD_COUPLE; + field_add(fadd, &verts[0]); + + if (wrapBounds) { + // wrap around bounds + if (particles[i].position.x > bounds.x) + particles[i].position.x -= bounds.x; + else if (particles[i].position.x < 0.0f) + particles[i].position.x += bounds.x; + + if (particles[i].position.y > bounds.y) + particles[i].position.y -= bounds.y; + else if (particles[i].position.y < 0.0f) + particles[i].position.y += bounds.y; + + if (particles[i].position.z > bounds.z) + particles[i].position.z -= bounds.z; + else if (particles[i].position.z < 0.0f) + particles[i].position.z += bounds.z; + } else { + // Bounce at bounds + if (particles[i].position.x > bounds.x + || particles[i].position.x < 0.f) { + if (particles[i].position.x > bounds.x) particles[i].position.x = bounds.x; + else particles[i].position.x = 0.f; + particles[i].velocity.x *= -1; + } + if (particles[i].position.y > bounds.y + || particles[i].position.y < 0.f) { + if (particles[i].position.y > bounds.y) particles[i].position.y = bounds.y; + else particles[i].position.y = 0.f; + particles[i].velocity.y *= -1; + } + if (particles[i].position.z > bounds.z + || particles[i].position.z < 0.f) { + if (particles[i].position.z > bounds.z) particles[i].position.z = bounds.z; + else particles[i].position.z = 0.f; + particles[i].velocity.z *= -1; + } + } + } + } diff --git a/cloud.h b/cloud.h new file mode 100644 index 0000000000..575501be25 --- /dev/null +++ b/cloud.h @@ -0,0 +1,32 @@ +// +// cloud.h +// interface +// +// Created by Philip Rosedale on 11/17/12. +// Copyright (c) 2012 __MyCompanyName__. All rights reserved. +// + +#ifndef interface_cloud_h +#define interface_cloud_h + +#include "field.h" + +class Cloud { +public: + Cloud(int num, + glm::vec3 box, + int wrap); + + void simulate(float deltaTime); + void render(); + +private: + struct Particle { + glm::vec3 position, velocity; + } *particles; + unsigned int count; + glm::vec3 bounds; + bool wrapBounds; +}; + +#endif diff --git a/field.h b/field.h index ca62f91cec..2d0dbac757 100644 --- a/field.h +++ b/field.h @@ -29,15 +29,4 @@ void field_render(); void field_add(float* add, float *loc); void field_simulate(float dt); -class Field { -public: - static void init (); - static int addTo (const glm::vec3 &pos, glm::vec3 &v); - -private: - const static unsigned int fieldSize = 1000; - const static float fieldScale; // defined in cpp – inline const float definitions not allowed in standard C++?! (allowed in C++0x) - static glm::vec3 field[fieldSize]; -}; - #endif diff --git a/head.cpp b/head.cpp index bb0e71d010..f2132e788d 100644 --- a/head.cpp +++ b/head.cpp @@ -178,6 +178,7 @@ void Head::render() } glPopMatrix(); + // Mouth glPushMatrix(); glTranslatef(0,-0.3,0.75); diff --git a/interface.xcodeproj/project.pbxproj b/interface.xcodeproj/project.pbxproj index 7b58f146e6..910b2dbd70 100644 --- a/interface.xcodeproj/project.pbxproj +++ b/interface.xcodeproj/project.pbxproj @@ -15,6 +15,7 @@ B6BDADE315F44AB0002A07DF /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = B6BDADDA15F444C9002A07DF /* AudioToolbox.framework */; }; B6BDADE415F44AC7002A07DF /* AudioUnit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = B6BDADDC15F444D3002A07DF /* AudioUnit.framework */; }; B6BDAE4415F6BE53002A07DF /* particle.cpp in Sources */ = {isa = PBXBuildFile; fileRef = B6BDAE4315F6BE53002A07DF /* particle.cpp */; }; + D409B98A165849180099B0B3 /* cloud.cpp in Sources */ = {isa = PBXBuildFile; fileRef = D409B989165849180099B0B3 /* cloud.cpp */; }; D40BDFD513404BA300B0BE1F /* GLUT.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D40BDFD413404BA300B0BE1F /* GLUT.framework */; }; D40BDFD713404BB300B0BE1F /* OpenGL.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D40BDFD613404BB300B0BE1F /* OpenGL.framework */; }; D40FD5FB164AF1C200878184 /* int-texture256-v2.png in CopyFiles */ = {isa = PBXBuildFile; fileRef = D40FD5FA164AF1A700878184 /* int-texture256-v2.png */; }; @@ -66,6 +67,8 @@ B6BDAE4115F6BE4D002A07DF /* particle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = particle.h; sourceTree = ""; }; B6BDAE4315F6BE53002A07DF /* particle.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = particle.cpp; sourceTree = ""; }; C6859E8B029090EE04C91782 /* test_c_plus.1 */ = {isa = PBXFileReference; lastKnownFileType = text.man; path = test_c_plus.1; sourceTree = ""; }; + D409B988165849030099B0B3 /* cloud.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cloud.h; sourceTree = ""; }; + D409B989165849180099B0B3 /* cloud.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = cloud.cpp; sourceTree = ""; }; D40BDFD413404BA300B0BE1F /* GLUT.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = GLUT.framework; path = /System/Library/Frameworks/GLUT.framework; sourceTree = ""; }; D40BDFD613404BB300B0BE1F /* OpenGL.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = OpenGL.framework; path = /System/Library/Frameworks/OpenGL.framework; sourceTree = ""; }; D40FD5FA164AF1A700878184 /* int-texture256-v2.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "int-texture256-v2.png"; sourceTree = SOURCE_ROOT; }; @@ -134,6 +137,8 @@ isa = PBXGroup; children = ( 08FB7796FE84155DC02AAC07 /* main.cpp */, + D409B988165849030099B0B3 /* cloud.h */, + D409B989165849180099B0B3 /* cloud.cpp */, D4EE3BC015E746E900EE4C89 /* world.h */, B6BDAE4315F6BE53002A07DF /* particle.cpp */, B6BDAE4115F6BE4D002A07DF /* particle.h */, @@ -261,6 +266,7 @@ D4EFE3D0162A2DA000DC5C59 /* hand.cpp in Sources */, F68135561648617D003040E3 /* texture.cpp in Sources */, F681358B1648896D003040E3 /* lodepng.cpp in Sources */, + D409B98A165849180099B0B3 /* cloud.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 6c983b0f1039261b38ae49102b70dda728b186b1..9cf966bff86303aa87ea0c9963c3cc86906f6585 100644 GIT binary patch delta 27342 zcmZ^p2Y3|K*T#RlnOPv)cgoVFg${wxTaY5C^e(7W0THDa8^Z3jVWa3M5hak2Ktk`m z7eP7+p+l%438aA?5WZ)3fvDdfqmMi1yze>Zo_p?@xp!v&_m;acmYI6kGVk(LB{s;u zk>L}=-wuB#d{VeST!eoa{#E$2@af?*!e@qm8=e@xAbeqXQh0LsqVUDmQP#263D$S4 z?^`EZ{nn4HpISe+er26u{l@y8^?U1&*16VrYoayDy4bqRy3)GFy573Uy4AYFy4(7b z^=IoL>rrcpHO+e3dd_;$df9r-dc&G+&GA|Dt#_=yS^u=&vp%qyY?7^rE!<|exonC} zx0SG!ww1M2uvM~EwbiiIvemKGw>7dgwKcc3v^{5g-qz06(bmP*-PX(2$M&kNzip6h zh;5kdP1{J@7~6Q;+qU;?AJ}}h4{e{=KC^vkn{NBs_N^_(_Jb|f7H3PaEwnAN`Ig#N z*jC%t**4m?*tXkt+4kD@+YZ`}*pAyy*iPBb+Ai2G*{<3$Y+1HjwmjQy+po4iY>KS{9VH!Q9OWIA9911P9km_x91R^!9nBrB9BmvgIyyMI zIJ!Gta=h&5=jiWv%`w<9%<+a}gkz**tYe(xZO40#$qv8cW5-m-myXfX9p5;&JNDrMysMF ze50LXoMWBioKu`Wr{5`@)11?tGn_M>bDXixxz2gcIA^?bzB9qO!nxA9%DK(C-MPcL z(|Oo=#CgHN!i*JW`PaTRr`F3qL8ysqM|60VZ2%C0J| zs;>I32Cjy#My|%LCa$KgX08`pFRHF~uJ*1Du8yuwuFkGkT(7$Ny80QpMa%nsaQ*0- zyqoT>xwJa zmFLQL6}axZ{&qcZJ#<^$Hn-hf!d=o`%3a!B)m_b9-Ce_7(;exqhA0AXG|+*_l%Ql{ z@BZ6U%oFahdc2&qtoAp07O9Jkvdio@CD=&oa+)&uY&a&j!y%&sNVi z&o0ky&pyw7&mqrY&v8$R=d|aH=Yr?DC)<Kqf zsnk^(D9w}>N?XO(Ug@lKSNbR~D+82)$`IvEWu!7*c~5y?nWTKAe5`z;e5y=UzE-|b z4CPzpM`eMsP)Smfl|{;8Wr?y*S+8tRHY&T7J<48Xzj9hRqnuUFDd&|7%4Ox6a$U(# zvXy(vLlsq0Wwn?Zt~ykws;H`3TrHuN_o)@s2(_YGNsUx%skPN8wXxbnZK^g?pHthY zZPiX{XSIvkRqdm`tiGbYrVdtzsAJWM>bvR(s!#n${Z#!zou+=Reyb*{i`2#H5_PG% zOkJ+7P*EoKGmjcGqjo7*V?yQj25rW*AkR=TB5d6 zTcxe`X=}8#+GcHwwnN*g?b8lw7qp97x^_vstXJ=D=n zdJ)~Om(WYeY?Ix->L7?ck6rfz4}l3 zG5xrHNsR#adWN2@=ja9cuU@<+uh|>smApm0Hm}`V%3Ip|jJJ%pthb!EvbTyi z(p$@0-`m97%G=ueoVSCwqqmc{o42>OuXl)dsP}d6DDPkX_O!<0UBU%n^N#LJC&gk%Q|Bo{v$k`v!@}nqY=D?~$!r$``h!1~Xm=C6L zlVaQ=C;S8Q!lw`?%!m8~h54|*YDBB`ectRdGwbYQ+j{s%;Zyz53E>}ye-i$wFdq@- zqr!YFA^fxO&%?hE=HtSgBFw2TwH-2i!su~^KlYWb-vs;^!hE98#;ovIfAqrenDFnz zXNUg~{$uzYVNMg~lfryTm`@Ay8DT!VFnn(KJSqqNOsX)S6Jc}w{Q`eRt>PDpgfI0s zTjAQZ#x>7xGFwNkG2&X9Z6fT)REqIB!>nVB=_6{Gt>cYWCCZzvZyWRaMVPJc8Ql`1 zOdlBcYF05@eMY~LmCV);jeaA&#*ZT%X6q-$l96@H*3XRek=4!CFO8JQDrW0+LnPKW zeQmrks-xNZt+9So1G6>8$Q|{p+4_SKIl5*MYplP-09PgB%7PNcd!rSzHO`nmI?`-S zFt&|uY_={m{umu)wk|TNjfpT@ml~bNR5e>y7@fzIc6908xo!8y0k1X+7FM;a3)re^ z#4q$3nPZBZts4!~*hsT=ixD-prrEmP=s&iu*}BWPoK(fM*SMTi^4~W08&SzMOa~3e zxN>Ie5hH3`CA0Oo(R*ASv-O1WMc{v@jBNqoSz~31D3jG_UaykbddY|$U&U;_YFw^Y z+BmkzX38+8kFRR78kg!-HCt~PSH@R2Tk{P2gb1_sw&7n~*Z8V_O|$h6MH#N;l?_+pfRpIPbxk&7bmK^q&4_JW-E3=N#J}6fWHU@nYME?CRFmpv z+l$6C{VSVo9gN#eN*VK-lr`Cm9ZjN4Hlv_PGqbIyv9n1jqul$d$!7Fx8nE+a(|Q3& z)B0xHKx1G;MYCv(RA1lQ$wV$o^zRP$Da6J{80(BE!Ai>Eu{VHqi0MuqeYt+&9*bfppV;`ZRd@p zA3txlr5jNtUohLQ7S6(V6qvj+O{^^GL5+Qk;dImRI}}-;rg_-*_Lb6 z=}_NT`MhSf6&UkAea>wA#Tffpq%mMygz5L-=(={$jNwytv+XZKOl@Pf-8ag&3K-Um zds8FKwui>p&*~WSwwE=Tjj^9~G27)wdWVt!S#`6$s4?^4<2Wov`_FZ=-D<4d@tkp_ zU30VDZHTGWjANe{F{0b6CcDwCeJ7LMnAg6w*@QOH464WW9n<{{Ca@dKENpd zO&7EMHKSFZkmsY_HM4!FvF)2@&Gt8*gkRezYqq~-bTwWv+eaC3?L%Ru8&0!*>?1E# z&Gre#%!AJxD|>b^*}v)airGHdSkW|^x?BWhe3qk2ksiOu#c{^+>4CpSa; zc6*LLdW9>}=#tXYXV13ZwBHh8aUyKK2ul=U3q@G62wN<|mWr_DB5b7yTP?!Yim>$} zY@-OmMX&1MA#`2c1DDq6JZxb zSh@(iEW)meu~ejer8Q7&*3ZESQ)t7*(gFIH~wm@$Ruh!8rV z5M9Msb+$!}!)~&d2^p*&f-ES6)Cw5vYQ(?Z)F_cwPN@?DBozW07_~3eF(#)e#T$oo ziwbqo0dkXIG-+vGr9}v^q!932kX-wuQvCUlZdswOeSlog=$2O3`0Aup=@bI2CXW9V6<`ij7Rw}1lUyw_|&+6rlazCNWP~~KF!EGUni!&)#O$kGeg>+3bo%F ztIl>;VnXu$h4LSRtgbGT95dHxvZ;=_A;^J3$b4hOxn9bGko-`ge39|D5@C$-xQ(Lc zODRi3`Xhz5diZHatm$ap1g1*&G6-6auyzdFP)izAGd@Q7HfE z$>bRyU2qrwIRrRa2sj)71WKc(aprZ(u+B>*( zeoL>S{1pORD+K&))V}mw2`3>%#uJ583NYISnFVKy8b>bKl_DWfW+BLGsM4$?YSIomeB|0DVlV1v94Pv%CO`TNd zqi*tBp(Z-eEGmaQ{j;$-AN7?#3Zc(ED%;W#6DJHG@z1y(^_jm4vF(j+7wZ^hFUZEN ztJd0`oZbD=iO$Z>F3zsbZX)cS2)i%B{uW^m5}iGqJ&AO_B*GpFiQtCTY&h~K_2~23 z?=9af`SPTwn&s*=h-y5!XrD<9>O|En7d83Goz6MP`FgHJDaE+M&t98X}QJ8;ZiW6Y2a>NX3f@sYK9n$v}3DH*S=#y}-HnsSz!4 zE)9*ow2+>8H2SE_(!Mp$jZfula&9Klxm8GIg;XxcDIXZcYk4RB%IXuChlcg)OztzO zVcoj_9>8wr{-+M$XXgPTori=JA*6~z3Y=F|GLB_dubtvN6(If3sXOgFL!|SZkg5o& zYA}#$M%AoJwJ$p}o*MiOXJ%-?HH1|2(SSe5D(%a2{_<4Lug>2>oLWMv9pXf^THo6~ zc+B+S!2#E+U;iJ~cHd=snkkpr6&5m8M@V%GO*y0wyUgfgtjbnuhr68q=tP&*WpmkG z4k6VSQUf716jGx^m&@f2u^S7i$)n%{vpg`nMD5b9@=x_z!4(nWH4{?wBVKZL>Dtv@ zwFACtxN5p0UA2VttdLp=silxwCAy+qb%=D;6H;p-Js0G(39gHgH>=ie?rI(2J?m=W zYUyevr00e7f{rEnEBZTylka`DseS(Ka z)B0ZP(|1zcI*sZE8biZIfi+dPZliydq-&h(oq*y0Rb8%kUGEX;nk1xGiV5k}pyj^C zwwy{n;rjHc(M)xHMx^TtA@vv1fFNgJpj66K8C7IJ-%0iB)eW?%hK&NzG>B^a?-lNv z>H02^p08csxD3~~LV8U|gM~CiNJA4{vs^Jmx@HUM^=bniS-GVAL+CR{NSO>#2UST{nqzLi#A^=VRl1LB%My$!&RBHrz$rMMEp-Qz10_rMrwf!XKUJF6%Dm zF7K`&q-jE$E~FVknwjXX=&lqBQsmW1b%Hmos0IxtzdWgK{ivGd>ensoh3@X|-cOr6cOUo5M7m!UQk*C%#Rm<~ zH`@PFqxK+o;CAhP%{|yX#646^H4I2bned9p}Lr>$H&0JT|pHGIeoG_N>suQJp&fFjdM^ z&L5rVDeZa2Q^r$PNavah>4K2bg>*U5Q{Gd-6XB^Sq^m;87E->Detoi&_EZV(FQmeK zl&6NjgwgWuR}~5|wL+M}t(2#3VAmjwD+9|I+wToFw*DDjyg>+5xTo?o34odgcX;7{ z71yFefWpm{r)AJnS7XHA(WRa|WqY3UwDm{Fd)f%;TD<3ZAzhDgn@tsqxAzZ-_jC|a zM(A}0!+LpMGScssmwS8qFxp7JTQeublcJ9?yNbYY-l zgj5hZJ-gugWK`=$jas#7 z+_piRR&@#s?S$uKApL1VcEo#5#q0<(l{LoPD_Z7UAphru>^jL!&qYr<#mHfZv!CaR z=c;GncNBVgF;9l)h9}e1#gp}2uBlonk4sMFP2Td{^xS&2bB}hTh75hT z>!1m5%=hHP)G?XV@_C;80WQ5=o3e&&r zUpZ($kU`~nPlhK`c~NN>(=E)T_VWn2ip!izF{OjjQR&pXbB|g>22B_w&=5+;q=%Wp z-LEQvuBG%9a?N<9zmOvXx(J^VXj{r)A=mm(^vW>LRnLK7@U?@%2WI5S9E}Jh$5!Zh zv@*8PdSH$URTGtW0;;!#TsK~MSIG7HwQJM1^PrJKdrTNNJjO4X^iq=*zdw4uGDYzT zxq*-y&R2x;p^zI1xpB-Y$@E&K&y{KZ=mg~p^ zN`jIo@4IiLT)AGu0rm%P)RYV$_XXSq?%MAclY-fat|T*^7r@mkGWFJR6gcnG1F(hixgAR zl}mw=nlOCi(7-dwn-h99pD5&>p~AYNTn*LG(D}+0Mh|egdX644=)H1-CX}n+v_X?{ zT|2pMhN^s*lId?2kJBGkn8#bnFaGExB}d6s@;ru8pxjpO2>E3pzbfQ@LVDOr$OD8t zNXUbelwXzKl;4#qGg4dw9*2HmRKLdb!Bj0b1hIaGJ(`n3S8@JPOx zs)yu50v5ZKR!ga6{Eg?UrPXJI{JM~b%~#8+<%Il(kcY=a+f7wVt5wwM0YudR;;kT} zhFVj|BZNFMrjEl@p;TS9K>$!M02mzvG*lZ2d5n<9h5*ggmj3~?Qdbq?l0-Q)ypcb-;fB1Jyx7 zo-E`k5iLL58LYl1KOW7k`ntc_Vs#k3)Zyxzp4s*t_H=cWeYZMB$bNqnA%7_3kA-Xv zzLiTC@~45fa`Ibs#&g4{d$upKe}0{Yy1P^13tcavX0aZ>drtI z8`VwfW_63YRo$j;S9b_`mXKqF{JoH83;72j|0v`+LXJ(QG8NT5>R$CHbsrVgpZ)y; ze&-5#o{$#^i_`BCmXa~OJf?JCntI9~9UtCDn0Lghr-dB%c*4*5o6T3xs~3bEFXV*B zq1e<*>ec^#IHg_>d^jb~k7)URxqp9VrDmzO{vSFwh)#@XIl3G}|M^vxuHIJf2F7_u z{YCv%{Z0K{{X_jz{Y%ITg`6bhWFapS@?s$`5%N+YFH2VMsrS{t)d%WB4b7yPg}hwI z8-=`2$oqwSQOH+?oGUCvH*``>vZ;e+*ED~0lIGBynoDzQ9!=3yA+HefN+GWj@@gTk z5%O9guS=pLky>#oa!4yhq~8^s$n}1gzoC#f2+0%^tC{kKX_fuW9w$tz=5JQ`sG-%= z>IYm!YPGc5T9j5ttE<%$@+Ki~7V;J$Zx!-3A#WG*jwC7*89q#FtToY^YR&xp0%-7m8`XARW(_?xkA^#L&bkVxTj4f_b#q$E^3Os(AmoEWJ|yJBLOvqoqe4C=P#o`S@AZB15YuDmgaqwf z?R`e~>fEEp&Nx^ELX^k5*ADUc5T|m#FjDTxNm4#feRSV;)CM<>90==dl`LEg0qxAaz=mfovURSRt zEX9N+Tv)6LMCuLoM!}TZgvBwy6%$e3^g)^CdW$F1uD28x`(vNF{+!+>P#l%Iy{NZ; z(#7=-!s2?g9`w%sW(j&1y{q0$Slq&*3XAtK-KO`{d;hyR>wWZ>g~cN*N=&^9rpl%I z>x2FScugNHESj+BF%v47%9naWf9pSh5&B4BDK0D}Vpdi#Ro2Jpll;-i`gnbUK2d*L ze@A~;e@}luCclD7D=91$l8MwOQ<2KLUl;m^dhnFn5)o4>!nDCXU7zWXj@M@hOT~Eo zYhkGrb1lMDx5amQOh7zKSSrWs-wR8Xo}C5_A5}Q2IeMJGae^MJ&(-G%OI2a1CM?wx z^mr=k3Bpn%re{Ud@K+Z32gK`(g{9_Wo~|#`*ZHH9^yT^reWkuiU#+jv*9uFdu+$Qk z+QJegEOmsXuCUZg(%0)7^o`nm?Y_Q6-zqHig{6V8v=Z{)LjF6(TgkNAy-(k-|4cFc zfPPRvq#xFg#GI>S>J{ExSQ-gSLt$wd)1lk-W|@{nq0rv7*I7K63TE7-2H$Ve|F;m>E?} zWx{UjcVbpnF-4kwiAk?wYSjIA{hq(^|9=0i-`D>pQhz8ct%W7{KF-o6xUZPjZ_&A@ zyyqI=s%#WEOUD>hO=|J1;1ikU+0YZ2l^fo0|A2U}HD*UuQ^iQ9SMx_N@VdNiug9x+ zRbgo_EFFZUqp)-mmd*>bxn8fgxJmVv6!MiAS2a_=q7}Rq{n7E>h?t4hOtp(v^VSGl zRgYO(%~bjMC~w^-E5}<;Sb9EMIo^i;X7jy`yp4sWm$3ACTqZVeGw-wiE_rVYZ_AkC z)lF5jw%!;2qiW}EAJenCsZ#GQ-md>@0p1?oUWI;N5|)0C{k|OV`-=BfVd*a{Pd4s0 zZ-4K9)}r?{@8Fo@)lCuF8{Yq{LhlIg$e5xvOcBnp-f3Q)llxrY>H8%)%O`SA0+E*87q7<3Qa~Sy)C0%dk+@`X7aP!~2DQ zK!W#6?^oVw!t#c&3>TI+pH%MSat#&ozf03QD_ELu{qNFj_oOyYdH)e5Lrk3m|9So1 zI6r~%9W5-QLK(Z^O$b!S1YsE(EMD(IZ_)r)m6(|xQ;8T?q{&fqzBf6raV-?7mc`yB z16&bLFyUeIy-Q=dMw+6-R(Mx#m=S4OQq4C6<_v>56JgG0Fz0ia^9{_I2Xo?K&O(^8 z2<9w>IjdmKT9~r|=In$y7hp~X%*li~H(^dL%y|H@CWsA#SPR4!g;+PlmVwxE5L*Re zt3zxg#72QHwh_cOh1lj0+Y(}5fY{CuJ04=0ogli2ViTmVmhpU~XRqF!w{4J0Irmhq=dL zZYsp}hPYQCt{=n=gt);F_d3K4hqw_CHyYx`LEJ=$dl%v+LEIFG6AJAYlR| z%!Gv5knkfUY=DHFkgyvPZbHH@knkHM>X29#63atk3rKts65E3>aRMYxhD0AE?uNue zkaz?V|AGZ(SRlcICa|D2ENBA@hQWd{uwWc4_yHCqz=8#^-~cR0g#~G_Fbo#jV4(vR zHh_iC!orrY@D*4%7#0qNg&)JhY4}(;0~Riag&SbuCRlhH7G8#hS0Tv`NjfAIhoopo zY70p(K+>C#G#-*BLegAFN`j;tH_XR}mIJ3yWWX#qD76SXlf% zEdBr%&xOTFuy_$HJ^+hTVR0HPegI2~z>;FHq%16{3QMZPl9yr0Yp`SpEcq0c%z!0d z!;&?yWGgJ$4ognMlFP8b7Pby!*pmNtT=O<-wPSo$(7eHE5Ygr!qpsUMam z!_rl-bPX&$3`8*7d3jh~9hTRG6_;V9 z8&;Ntm8D^2YgpLPE1-39Rl9tB1kr z;jnrJto|NW{{XAE!s?%3^?q1mfi+H8wkg`GHh_b1{ZAT4;zNThT*Vb9cS^!)Bg01&pTPxVs9=3IaZPQ@eEZFuvY&!wlF2J^Q z*j^L1H-PPpVEbg){wZw#47MMI?WbY;S=dnpc0|FBy0Bvu?05%uya&D=t6|3$*s%?E z6u^$ZV8=b!*${TNfSs*i=f|*f8tj|_JNLuRvP>V9z4h zvjq110(@?EMk;UWL84U~ew`)ChiR2|u-lpT2~jzJ;G= z!B6Mlr)%(22JEZvgMH0mUkljhhkc*JzAs_lQP_7H_ML_O)nI>J*k2#^e*pVGf&Eiq z|6$mF684{lpCjSthVXM^_<0KaJQaTa9DY6kKc~XaX>gzz9Pq#a6%O=;14H3}Zx|fd z2nTk-fjw~G9vqb5pal+ghJ(G~;LC9EJ2*HO4#vU33^hGQl;77oX3aI7;N>kY?VhGPkE zY$+UD4#)1o@h~_pDuJ)LxL<2U1^!)c%k<2vUbY z>M%%s6H-S)>KI5J52U)s-0i^mM^+QPg1X4eP)Gr}*I;4ILsoz3s45a=5sj-k6 z2dN2=x)6M+iy(C=q^^L})sVUlQa3{C7D(L=skIq0a1*vBt z^#-J7LFz3?&4tw8A@xrH?g~hEuoTR34lv zfYXiPw686kegRImhtnP5^tW(29!@8~>4k7Q8BPblf5YjAaK;R0BsgP-Gfp_u6V42R zGlSvG>u}}`I5QT`jE6Iu;mm$Ga{$g9hBHUu%xO4t7S5J{vz5Vjwkn*h0cRuOY(qHP z7|xD{vme0ODR5T6*^l7tS8#SZoK1tXSK#b5IC}%mX2IFpaP}8CR~^nZf^$vaTr`|} z7S6o@=i0%!Z{XZKI2RA+65-rJIJW{H=T^bFU*Wt7&WFKy3!ES{Vh0}21EL?QL#nN!G3|uS^7bDwK%wz0M{16wMB4kDO}qR*Ye=neYjo`u1CW4MsU3` zTyF)}JHz#^aQ$Vt-WRS9fa|Zp^`UUxHwvzgh3gaG`a5v_eYpM|T;B@U55o03aQz-+ z$dF-!j1rJh3Nk7}Mit1Y4jGY<5d|4dAR`(wT0ll?$Y={0ogw2@$aozx#zV$L$e02d z0x~`ZU&d6(_yRHv$e0Bgvms*+WXyw%WXMD4l*u6#%0L31{pb! zkq;SnAmca4_!Dj@aHA>QcoA-lfgA6^jSu0*7jPp6Zp?-o^YL+G0o+K28%yBEa=5Vp zZfu4d+u+7dxUmOr9EKYw;l@?CaUE{t!i@sB@eADe9d7&unP$k8A+soCS|QT`nO?{& z2bt9%vms+$B;D@vc7<~$X)~4TOs==$WDdqG|0XP*_R>v8f4#q>}<%s1KGbp_Meb_53(P? z%@T04CEV-`H{XVvKDhZ6+?)kB6X50oxVao|u7aCu;pPUoxfyQmft&l_=79j=Fx)%_ zH*dl%H{2=&Nj%|2|0TpXCLGogPb#va|LqpAg2Iw z{(_wQkn<3Hxn{_fA=d%9Zpc+3*9*BNAvXeYqae2_1_AvxwjzqSI9F%o*nW^LSAXetAvlds*qO$@@hd|9ms16 zdCeiOCFDH^dCx=MAjta+@@7HacF5Zgd1;WB4tcjAFBkHDhrGWa?>^)`gnTpPTOr>8 z`EJNpA>Rx6nIgpYR z00pO@;2IQUKtUc9+=hZ*q2LcFxC^(#;I;*B7lYe2xb1}7#o=~ExE%?%o5JmAxZM_R zzX-QG!0pa(yBj`kzXG@W!R>)?dobL79c~M_oea0v!|imqoe8&pg*zs=69#u|aK{OE zJa9*YJH_EndAL&%?o@$0)!|Mg+<6i1jD|at;m!iMvmEYhhC6%U&T+UC*f!lc4|mf4 zuY~gqbAzzb?m43}!;U=C6nUC7D$`VkVhpCoun;iKm}YueFQzV`I3^T>z@hipMvtU zP<{c*FGKlNDE|h^??CzYP<|iE{|n_&80&$t9E>f1u>)XiF^v5L#?FATGhys}7`q6@ zE`_lxVC-rbyAj52g0Wj+><$>a8^#`nv1chU_8N@63uEuW*het-35@*>#y*3w)iCx0 zjFn-m3S(E1_}?RIZ20%}}`wDtAKV9;n<8mB*p-0#yD7D(^w%eW-j4 zmA^veA5i%mDqlin6e<;{j6-D-Ds`wfLA4F438?0wT7+t86R2(m)h(gA4OF*<>dsKz z4XS%UbuXyy3)MrRdK^@jLUlP*&xPt`P`whW*Fg1psNMk8+n{P=yFhJss4aln0Z>~6wG*Ir64XwI+L=&08)`p?+WAnsf)cf>p>`eA zegU-`p>_|{?t|KcQ2TGFJq@*IVSGmz-y6pFh4KAi{6HB0PZ(be<6ppd1;%SIJ^|xX zur>^9n_%saVeMY9b{|-~AFMq9)-HmzN5IjE5ln0h z6Pv=s<}k4{Oe}(l#V~O+OdJOj$HT-pOg6w|GfWm>au!T3fXO3aatTZ>g~?N4@(h?f z3ntHj$yG2}x(g=nfyw(}@*$Xf2_|2G$=6}>O_+QOrsl)celT?eOdSbR$H3Ho!PE&b zbuvty0aItf)Y&lgbC|jordGhzN|;&&Q#ZrZM=bGE7xrYAuwereGSFPQdga zOb^3!0jAf7=^w%Lb}+pIOz#BKJHzz*F#R!1e+ARuKpm(DpdN<$dQgu*Jqqcc{-wgFzp?(L{ z?}GZDp}rdGUqXFdi0_aAWEL_TnS*SO9FLriEJI4jImna9tH|rfo9miyA$2r@wxF$O zJ30^D7u^p%5Iq>Z3cU%v1-%`;6a5qVIr;_qHToU81|7qOR&40Oh6FaGupx^Ln_alon#ce%NpzHXMQthhxL>*l-RuT#F4Wu;B)5xD6W~!iLAN;U#Q%6C3`D4XZIA z20Ae?fPooQHu6F>nb6F2}%C7@GuN6!r)>Io{Yhz82lLq&&A;R7`zCBmtyb=3|@`F8!@;FgSTMt_7Vo4#^AFU zd=7&zV(^z3d<}!|V({-6{3iy#z~EOHEMssIgLMowV5k{G-55$^Xc$AAU}z48w!qNV z7@CWrf5*_F7&;C^$7AS33@yRXDHtl9hN0^)bQ6Yd!O-m(x(h?kVdw=6y@;V-V(2vt zy@8=WVCat+`Vd3^#L#LCeTAWKG4un5$`}^I0St#RydH)l7>;5%iQ!=kZ-U`D7~Tp? z;U8gmI}GoL;hizO8-@?Z@TnMHh2c9e{0N30!|<~hegVTTWB64JzlGtqG5k9WzlV)s zY|LQeme{yGHvSkJ=VRlZ*tici?uU&BV&hTRcnmh4NXf<}*mw#yo`#KQW8=@U@qBE& z2pb>8#!s;+f=zL3>cplbHWjdG1e<1K(;RHt0-LtRrtPt5M{L>|n|8ydeX(hOY+8g( zCt%Y_*mNp3oqWSg&9QkaY@UbByJGY1*t`In_r~UZv3WmiUWU!LV)L)Dc?_GUG17>UQWPUy z80p2x07i-!*$5-sW8}venU9eL7}*;m`(k7fMvlP95{#UJk<&1;Y#j(A%Q12uMlQt2 z3XH78$SRE7f|1)Xau-JK#mEB~c?KiTVJY$=Mqb896=wu-Mi^(ThclXS#-2FiP@Hi% z&iGH9u^4CEiZdR<84u%($8g3IIAaW38n7jZEsfZ+9=6QKmV>b65NtUdTNYuX zqmvk|W2^~dtr+XWSQ=xcL5$@vR>asw7@Li;IT)LZv3VHV6JvX0>}ZS~hq2=^b|S`> zVC)o(orbYx>p&P=jV7dDwOV zwq1;Emtxx$*mfVbJ%eq3!M1U1&tdy6*nT9o-;M1rWBaeL{nyz3TWo(9+dszkPqF<6 zY%gPb72DTh`&0=#z>WZRgt22i?C8Oc1a|acM+Q3vuwxiI3fM7%9UEfDJnT3KJC4GR z71(hfc6^8(Ut`Bwj0Z3t!+0CU;~4M4crV8LFg}d&JjOS`cxe{KH^KOJ7~dY_^Dw?E z#^+;vZ;UU*_yHJSgz*zFeiFt{!T4zyUxx8>Fn%t^FTnVf7+;0)n=$?{#vjA@lNf&n z+joVgZfPGV;>cD7(=D|U8ZXD4>{U}q9L)7ZHocFw}iO|Ww_?A!u7x1wa{ zkFaw)>^urPFT&3Mhn+8D=U=c(?8;%+EbN+%U7KLnHrO>6ySBxy-LY#A>^c;?{sX&? zz^sahM9=lG&t|i#D9J|iNt_!g1V(hvMyRO8pYf9L4J$5~hU7uoi2)l=|dr#~> z3A=xR-M3)(z1aOEc0YsNuVDAv*!_F#{tI@0g59gJ`z!1&V^0(IM6stIdp5wHJ+bFd z>^U5JmSE2**mD~8EW@5s344}f&w1E$A@*E?J(pw8t=Mx1_S}s<_hHY2*z+*DahZ?X4}*!v;&euTZBVDD<|{Ste>!QSt&cM5y!mos?=kT{CjJ)_f5yZ=F!4DizQn{gnD`!(4VWxoatll@z~mvAJPebI zFu53$M`QB15+={Y@^VbxiOEMX`8Xz@!sN4>&^n0f|N&tvK(Oud4s zcQExXrv8AbKVs@nnEDH*zQokmm>R=W4O0`Cn#Q!4{x?j|z;q1L?U`yI?1q{7nArm} z3ovsOW`2s9Q!sNHW|m>5gqh`-xdbzpVdh%Qtia4l%&fx9eVBOwGml{Aam+l0ndjH} zUc$^Pn0X&FA7kb#%zT5HHJBO0Obs(rm<6)|%(h~-2eZSN&0}^0%+A8>CYap_M148nee@_6*FPh1qj3doE@#!0g3X%3g-qD=~WwX79l4-I%=(vkzeQA^}ngkHr3Cu>ZfX{{-wm8T(ga|J~Ss zFZMrx{r`>qk5aP#3G9Cs``^I+Ut$06u>U>m{{Z_x#QsmQ|L@rU8TPNnfesuP!GRrc zV0RqY0|)lPfqijc5e^)I150q=6dX7W2bSSL2?v(rz`(th~=8nSL$(UP;xl=KB2IkJf+&P%L6mwTlGIurRuEX34%&o-S z1DJafb1!1hpTm3+^BZ7(7UpMTekaWDgZYJ+ zKLGOwWBxGAFT(tBm_Hu#Czmk46!WKI{yfZIfcZ-?e+A~R#{3G*uf+T+%-@gs$1(p) z%)g5HH!=Tf%>Nei?_vG}%zuda&oKWj<|i;eh52dB*Rc@6LJJmJvCx5qPArsqu+Wc% zK`a!ous#+x!ooIKn2UuSu&@&rcEQ3PSl9~-`(oiREF6u6rC9hG7M87pVPQEIF2KUY zShx%eH)G)*EIfmSKVacoEC#R`#$pqeip^MT!QwC$cfsO9EFOZzW3YHK7MEi2R4kr> z#WS&ZH5RYK;xDjxBNlJM;w@Ob7mN2}@lh;3fyJk>_#75r!{QrQd<%>3Vetbjeu%}t zVevCc7FT0&4HoM-B8~)cq!CBbIFiMY0UQ~|kphm)#*s~NWOE$Z3P*Opk)Pnm5*%5D qBX{A*Z*b&Y9I4{^>*4w>xPHgKUP=TT{-vFd{7a$f{}--5^Zx*{y6>3) delta 27352 zcmZ^p2Y3|K*T#RlnOPv)W_QZcq<07*^b)FAK$;W*ktPHLX;KtK*uD0SGHOH#kV5Fa z_ugv~dWQs(KpNJ+3i3U>0iu4I@I1_%^S2=49taoVpM-xJ{#p3v;WNU`@Wk+>@cH4%;S0hShA*;@wvV$o- zx4&(F&;FtP6Z_}(FYRC1W9{GBs{MxjrajAk$DZf6-?jf_|JDAx{k{W7n8W5M;cz(I4zELZlysDF zlyg*cRB=>y)N({P>Ny%X8atXfq8+Ur?Ho@zo^?Fu=;CHr`Sm;>d@Gp0)a;$Z1 zaBOyLbL@2NaqM>-avXJ>aHKj;JI*;SI<7dbJ2D)Zj@yo0M}gx<$1jfG9QPa#oK~mo zEbg>BT~3cvbNZa6oliI`I4e7=IcqxWI3u0)osFDLoh_WLoNb-$ozFNsIy*bNIeR!? zaQ1b+#bE5MN=bO%H&VchB=ljl&oS!;pIA=P)c7E&p-Z{q^ z?@V$oa4vQ(bFOr*b#8EOap|s7uClHQuF9_Ju3E0Tt|(U{S5sHCtF`M%*VC?!uFkIK zT|HfWTzy^rT?1S%yN0-iyIygPa*cMq=9=i5?3(KGyWV!a@A}B)|IGD;>nm5R>wDK6 zSAuK4YmsZIYo%+AYlCaEYrAWgYoF_&>!|C5E6sJrb-{Jnb={TW%5vRt<-2}x{o?w~ zb>EF!au;*k-7dG+t-DLP%epJLE4!<^Yq{&Xquh<$P2KI?Pr09VKjVJZ-ND__{hYg# zyN|oC`$cy@_eb~Z_?#^}Rx%1tBxbL~|Ds{tC>xc%%06YkazIH_ zPAR9AGs+DmUCB^xDtDD1lpmF!Ja{Y~tHEh|?>E?OK)8G7{gwy|~XR7Bd&os|;kKYsU2+wDp&pk6dUwCGF z;yiOab3IEuOFhdx%RQStTRdAm+dPLnhdoC;M?L2}=RFrZw>+7iEYDA#|9SrKJn&k* zHg5^9)2nz5ug_b?ThUv|TiIK~Th|-uZQyO{ZRTz6ZR2h4?cnX`ea`#5x4ZWRzqg%5!2 zJH30ohrP$VY2H)bbKWc7>)uT7ZEu11C-3jx`)Zh4Om(VW)lf^T71WAqb+v|CN3E|m zQd_9eYD@Jg^=b7P^;xxp>hGcURC}o}sQuO9>MQC9b)@>LI!Ya_zM)Q5r>JkL@2c;q z@2elFv()d@@739AoH|EMQ0J@3>H>A4x>Vh+?pF7x2h@Y=QT3RbqNb{6)U)bE^^%&Q z-c)a?nQE3=px#w~P=8eaQ17Yt)ju_xR!l3dDVj&~`ZZN6qm|X3(5hR zYpu1@p4K{OowaV-P%TCqrVZC#(MD(^wO6%K+Gy=H?LF;%?E~#Y?IZ1D?Gx=&?KACj z?JMnT?K>?&OVpCI`C78JTw9^-(e`TlwEfxv?Vxr@JFFeiE^Ak`tJ*c~x^_cL*Zdh; zrgmGqr`^~7)E?;Q7Tv0c>5^Vdx9d*btE+lVy_Q~EucJrkb@fQSo*t#w*Bj`K_2zm@ zy{-PV-bL@KchjHO`|2_JFnzfGiatUgtxwjc=>c8nZ|m>q@9Q7wrv8=wwd&Eo(G&DU zJxQOhC+kb~Wqy5?zFOa?Z`F_MC-jqgik_;c>8JG5`WgL_epye~GxXc~9sREUgZ`ub zoBqJC8xF&1=!RjGH7XdjjM_#WBf@B4G%=bQt&H|YC!@2`#dyK!ZS*k)8H0_Njn|Dg zjLF6n<4t3#@s=^om~Omlyk~r3d}@4S_+yQ4jaf#VvBFqstTI*`YmBwVI%B=D!PsT& zHVzs`jgv;2amF}nTr{p2H;h}x55|wiPsY#2FUGINAI1Y8K9|q!Q+ytu*QfeQ`AYjL z_$vCU`|9`_`WpEf`&#*0``Y;0`#So%`g;5N`1<+=`(E}9@xAJsp>9~XRi9*SmXM>fu-ucG)fST9Y^?(@L_?b3E@M-V}y0Lu@LA#Cg?}%sX~KF+SWgS<8DTvutml%$+ zr<#A%scyATH~WvSYPAcq|7f4NGSO{$*W8&{-|~TZWppj8{bMsFqPo@onOU!H1FQWD zb6Qeoi)kK7YHaz&%pKFpYM*6BjICp}&o;-+uWy-aem*wBYELk?jcsbR&o>>(QI>^f ztK=${C1#g#HLUjKW|whg%|*#h%PMnjz3Nu`TJu_RBg+QU@>+z|zS*qzS{n>paM z`d0f+b4FA(t9_4oVnKDwe)DQnEvx;I=^9_bYCme$8(-CGKVgnqSl^Or&RtmFa@yP$ z6rD3yma1p9Uo=}asA9EWF`r*l!*boc)}XApsDa&T&oDoqP{V4^G_N(NZnfVwuTQ9D zwdb17iIuJP0`tX&QC9np=4TCSTkXG?n;X`&+J7^*O>AJb-!n@$DsQzvFk3WgYIRu6 z&Xa0e9kMxNNgYdZ^F*UcR)^iZwxof@Wj>fx(dzJ+9UE7$Iy7@s$R+o3Z`XQT}x%N-m==3YUbD`m937NrdU?ZQpeoVB*N;5G;cSlYIW2% zJxyy_9gWPEO{1)irslY&b*zpS=A5QAt&Ub^{NyH9M_V(0d0k6;GpboltK%8-i2>ED zj*jMQD_U4On>(9DS{>cYyUm(g9X-sQ&B~Y+-qfs)7t8_8>sTFq%}<-xw>n-jcQ$We zbqq8IRjy)nylj?fQP1j#F*~=YZ*{z4iWUv6j#teYQ)^lsW6Tq)8d_d6^QT5x9TUxt z(RHnkH_SJqBdv}%%_Y&bt&VBt%D0}iIs)d(w=~l=t)bQNjybAj1FPeGGrnbQtK%ax zzNKcKSX0vSsrg6CDptn~vtp~tR>w@U-1K@@$Jb`J>9wqmZ_O{)*0+3bu9#lO>X>7$ zm|otzH{EG<#G4h?)wU#=16xN}9ScnVjPhoiwz|1uU1`f=^K$E+R>v~4!usbdD@}i! zURK8%GwQiQM1E!6>R4}9Y}?!F*ktfM^~`bYC38T#3RcH% zGtjQJ)v?dq)$UoVA7j%#Mi_nxyl(#>6m%A4Ju3pYQ1%5H9d z&uMks3W);mx=m%PX2~{{_uE+=IcC&z4b7EL>sCj;Irsf1t&She*FKCen>|y>>iF60 z)-7m6H>Yp&S^j5=587KDf0z~977CT^RV;s+uYDM0PI?CkDLvG?%=`~) zS)IkqdOM!BgqzCy5vKB7F>`wdm(}Ssk9UZ+Iu%oVP}4l|Q86>Rqh@ug=EUgKx=JZdCS)H}b6`h7zopsIpPrF&2QD)oEp0_$1nr%O;X}0NH!p!Sj-s)@; zGIX-1W=8IHTAG{Py1Zz0wlwoUt#4lGQqJmZWB%Etht>I{*=Ao4%hRTRMrW(DgL!;L zAFH#I*{xegtFxA4XLr-mtLDrDQI=joCo>QD%>G|`tj<2>s-E@Du3x&$ zm0!B7&VJ_3F9%wk1I&suyIP%t&9;4Nnkx@hvnT9BFnlU$i<$ zo4XFRvy3a$^{ioaPB823C?5(uuBX@PeBE5tvy0U^#kBP5YjwV5uINhzndjtsH9J@cDRR_A+Wn|<}oz&91F&JWFkZyK6^zEINY{KPB>DnB=OwJUE9>Z6S%Q?HM^dC+OonF-&HOhTw(0&a;^@vh-h4hJ#W(esUA$=#LIYLShQnHX1 z3u(2G)(dH~khTkHw~+RWD$=1~ss=Hbmt8)F21<*tc>x|~*HtD^YM|28jJjOMte$45 z!%wSf+d{z7BEVB-z0>VVK3gbXUL@}nBsK|oTz^_u zyA}dg76E#gfy)ielf&%hj5M42N>OfJD1@#y^Ul;aUpZ5$c)voy+K^!HnM%b66$;jy zd1rbSk12fFXy%=XGN+!cTzq7qU~>q5?rbG<&e>v)twoL}n9g(0tFITLwin6YG+#T{ zL!DMA-&rIV=7VdM&2e6jnP0&co9ncAG}pU@sNF@V56!#hqhj+NLG34n+Py{E8D_V% z*57l1JTBs6SPtKVa579c8Y$dqADj)!!`FMHIKLfyF{-D%TvvA#Lr zvRge@sJl?4yJQ9~cT=wx$}bhk)6Ki*>zn!&Psv+_x+_Jx+rgwY4i(?XE1H^H2)I@R zxNClSWmw6d3gtJ7q&f4i!yB?+n9 z9y;!avqM1A4tXj!su1 zo4Z>FqLbV$+|llq?p7k~7ZLWW2>YK1`z^`c#@&_(_md**cM6Q7dea`SBknHl9zok(-QC>JySt092SP$f z79m-a+&$gBh;a87QkamWplwMu3$orcf6Ay7IoLhyKY7F5uMpuLDWqaTDjwpMFrT_v zJ93jMzBDze zS;IzkDnvDERBu|}sSTnUME$)P@3=n)p8I|G2SQSXqzOqEl9A;8$o(-9?oWl} z6H>{L=~6+{=7d`nBfoKfALPZlzje=YeE9c+!M*h#4SS-fW!v1_iE!@}QZ*q}4;igtzMNSra=-gXko2#edenW4 z2=@sg)e=(e5T}m0C9`VeY4?Ty%)~|arNT_q6;kBGOt`bkMy9*7{*#mKzFo+P5>ow# zoENjo`tQ1b`A^QT?*A2X8VadVA*XWN^}Sthcb(BEl%xg?8~!EV4-_eA>fgqNA}h8+ zQ%!`_^s%WP^~}-PKEG4({#Oi&s%VA8=0a-mnD|0wrt8}D{~v==MyU{pPEyJ$PblS- z@>M2o5 zeIY$1q^E`SjF6s9QW`3ah)|jcse_O@hRDx_249Ce)g#*~PX~GJlqZ$;%2Pt>ETk?% z>MEpeNy;?o7nOcQDE)=hOGqz-c)f$U*X|F$ zRK5SysAdfs)T>h=szKu>(_Wlfzh1ql27jwlWr*@h(Dc8RoH9ZgNrW;=*rmQgdNE|X zpSeG$s(-xl#(%~$S(!qFGF3?Zg)|_<85oSmy<}X~VlPdNimD%+)*41Nn$~w}RO2Rp zpVbthydO-^+sZr2yUKe)8Z4xjg)~G+Lz9#bln;qeJ{D3;2_X#&SsotBP;PFm$gh-H zLE_iSH%hGXt&m0tX{3-|71F3A?hWk09ya=2hjM za=&ndeI%rhLnPVUSx~_*c}n~j#}n?c6X9_R=~E$n7UFzfx$XKFZa-M{-nD*H8`Nu9 zr$SUz(|XhTOl{hv;omCLqk2jPqLVzDNB0;WpOC%~(w9P-DI_z=Q_54iFqE%^RP5`A zu>_a6Yj?{o@T)|{M!=eY3^x3gr}vD zz8BK$5GSs3+x5@(e>(7V+kR7{8Z@g@A*x=p`qTPMZPvJfx#I`D#=ngwPX|v&B0Qah zG*?LTLT2L4oUY zAf$yM<7LQ_aY6p5Q9(dCfE4Gr==aNK1sYR7lH&v^>f4y5|idJX3_ULP#q^oK~%JpQ}W^;|VR%|JtUHJRcL``BX@2gtRuqTW2o#xq9SGPi(MF|H}E+Gm8k%_d?nr zq>UlYCiDK!wIky_$^ThK3p@*n@GKV679nj7akiPgf2kU|(zEVA{jB$FAi}drNIQhI zGsM|tKKP}2adWGJTkRDBGv5mKcl zfpI(`FO|c`6aZUmWs|cp5KG} z@VitlbmsQlFN78yb-b1!R6K-MG2QoDmw#;8D;2_vPCwq_fl}t& z`zrKFLOK)g)rEA{6!)r>EEO0S?=3B) zbA_KNa=c}|Pk76D%bU;q@r2`okS+@8e4u|(Yu*g2;;m{%-77CAdaHS>o00e0dTM#= z1TEJV(&cz>gpjTTE!T(&TCOjot3{R@ddqnmQ%y+MLx!&f4ga<6-J_$ut%`zdEu{3q ztDsxnc0p@T3Mr$=+Ed_K<_Jo=!M=v-of6Ny+gc1y)oWl-r+*Z6HJ=8ceHnmcdU1ukp2|1EaVbGb_rP#vL@uxLM~g>brTA^ zEhKZgWgcd*ng{z0MS*Fwom zOWoq17D|3Alw7pbE&gSpv7;@P8ouC%FT?x2H!hgH*+O~{@0}xL zg4rvV5X@epkS(XV?VayUrUW?*b$9eG@-FsHh;3rERMMDQ%4hcQEACU+Y=@s$Mc+SpZAi&k;Hcf&x%=+OSDPGg6+ zA3J2y=$J7RJB=MOc;bk$V-md^y_;fhSuHL6TfN(hV%s5P+oRIh9Sm=ekc<5@yaV3F z-pK)q3%PhGy1Rdk?x>K%6kD(}3Y&hyn-VmCQpn-)-c%vm2P&m{#Egm=GBIXoFe>wl zd%FKj(A8NXJO1hFqIa=(TF8|%J+Ud0Me83XOMDyIG5iGveWpIVBsFLdblmE5OSILP^L_AuWal9*%I!mtX2)GstEasc(qz=YuQphc3GIEc<)-l=xYnP z{6C|Q^e*;(6AHdUDERw-Nsihum>fru=O${ip!udkt`x5}7jorTx6M+iOl!4WAUZ*9 zqqY@tRUubPP@h!W3%RqocG3qcO*B5dFAvYYTREzc7E#+bt7qd*OI#!+VZRW2y-=IQ$TYX2!&4t`T$kDN17Pn}{gFX47kV|b?R@^eu+U65=W*|CQ9i)Dyey+Z( zexZIDimsKATMIc#$Zdq&Hd!?-n)F1(#0m|Y z*Kb;(TW95V-w|Ub#*7~}ct}iP3GY%D2U^7A4uloua=E%b5WPTMp{`U{d0$r7sB6`A zLhjH}$eo1TB{-kU-Gtm-$UPUR8`O>JCUvvAMct}y6Y>i}?kD7dLLMUIVL~1$#}0B?if&u&4=*F-t^v%zsi*Afi9+1-0ZhyFihsM> zwX3HJl|2HO?sHvAsOJmcdj*X?Usk=MUJo=)P_L@jgxp)meG=3gYPyj73i-v@V=haL zvg&O$H;A|sM7$J2CT3PY8X_Srh!WBTou`tC8Oj(mfHj>2ZH)jSF)*KER_tWYC&Fx;OW#Eeo|1 zAKy1>ZOyyq+m+JV7b;#aQal^nX8rrMruJMRdU6rE>%;5i68%EUjn-Z37l>Y{_0W21 zy|fp!-dZ26ulAyl-xTsxA-^T$X+oYZWWSIDLKX{&(E4iwh`_DANEK~JaQ7;|E#!AZ z*u6lQkVQZd^1B;mcr6tzmWHvbREux;*ucQ}ph@v~5NH#$w}M_LYLm3rwKufM+7#_g zZK{yp6Y~2){y@kd3i%@;e=Ourg#773s!>JrYXMDYZ&O8kH_$)m<})FGF63{8%@t6D ztyFBJX1U_|LYocS8PN$g_nUC*(Ooo-5>e3$-oUR&ATMUE87U)OMBD zb_+RP$V-I0QOKKwd`ZaHg`6X7#Wvj4Ewbe~?U;5t5WPS@&X|*TtF2fv~yJ9h<1^PK$Bv&qTLF#nD?m2 zvjZ)PHUe6X_EXSRu9m0eYX#a}?Fa2gAukp3G9fP)@(LlZ6!Iz|uUS`Py3B( z+8=@b!Rlx%tNlg5vq(ssuL?N#W&Bc0gy~K+TcW^yoCMM|NjQz{|BeFxt8w&9* z-91q8JYMNTTfZJL6BBhs_b@irUE0!Vu&$R6L@&|}-KUq-OX;QcGJ0A43B8<UEX;zM7e_11wF@p>B} zAAHzD`jdec33_||DIp&c^0CK7sz0kg7igNGchEZu`LK|WB&gl>&O-J-BjlsqM-1&g zamw?2ar$eN(8rUbPtYe?x-e9qs87^i*Cz&_1}dH*uM8eP zc*w+<@u5oTG3?1PgGyKW1qb7|W(`C}=mt)2cRI~{b2algP;-6yg zBXOq@V`3g*m7s+&e-Wt<9{vOn^6B6tJCaxRDf*iO6<_-odiIQ&Jn|SlZO4rp6;q+};32O>bgj^7#F$rO##g8@YG}<0PmLcx zcKkpkveL6LgNHV6K6vQRPBDYW)EG8u?BI!YDun*4S*cEi@iD_<#>b2q5;L^Iu<>I@ zH-CP_qjGxhaXIM?^$&wD!5*#^S`GA1^cjKZMEz6!GyQWRUl#HeAzw|@zX*Zzkz;h|uTh@j|{8s{Cvr-;OOPYZ>WT zs4osQjn@|mIWt~gBIK;tSD&zy4O^kFjO|p>QrYj&*Xrww5F3Pir>Oik>s$WzHx~MK zeSaW&fxbiEsqfNv>wEOQ`aU7&3OP^6`9dxb@?9bSAmkqx=m&_<59x=AAVNPTTa!4S?1`ImV8qL6(_+*zj*zI zkbf)M>=ng$OV0{|GKKtmyq+!OKVIlDcGCDEe?jEx`9VaUknhFo1wy{xXY}9^W9kkW zH?G?wGYIH^m$U^>I$oGZK zB5c+qy&(8&ga;*s|7>A>x{V(@bkdNC6Z&?KnGgzd!lMuj!)@b#jf}>@lBxQ9b0hlkdT+E8wufifT}GQg zi$tTX(av~M*c4$?h0S=>I*0MJ(c$k)xY5ygPS`xc=8fH5(Ne8UH>1Zt06mRf!lntE z9vfE4QnAd7M*n{R1{ec{%_nRnV_R3URC{WuF**>v(17<#*eVEH#f3x|W2r(lW4tlJm}pE2rJ_>o;!2hco`CUoAUfUIhqe zu+kSY#|VmgsrLGGn>0H4wIj!q!IE+6(z!Y;qOLYR^Vvld+i+ z#uj6%vCY_S?1&vv)zT|GTG$#3TO(m>9ec2-1E%7-NwCt_c!W~tgPh|UZ_et$?d zP6uCYE&e)IgMS5`!%*YAaUp=PwGy_L!MSl5!;Q<2HsHle6#BSoT#L1YSqwSBxX#$n z<3mllkrCUvx+TJLGj?=!OOuzgjNCxefB&Llt)s9#Cv2UBt+TLonXiB03-{SAn$IcZYq2wGSo#;&eZD|+ zyw8aJsfH!8cv)ZYvcva8Z0(wsYEM`81@AWsH`Kmr!q)TQO698=Xp!Kn<*O}hy@ajL zqw;b1>iVMoJ~DjueGOt`Yg(%7O?@r?p^Emkj7_U)soJ}p@5#T-6TYW?&ldUZAZ+~~ z`Rx?++u7Ge*aisO<2xFM?|EO(e;DuOdm*-KElXv+pKriFR0Dm3Vn3*5sq7x=i=l*X zm~S{keItA$V>i{Z^wvlDM*GJ2#!|vJ&i9(7i*KZ*t8YSV)!LR9{g3O}_qy+mU_Db! z*hUH4@WN^iJPh-eZ(3krqHns-?+XaqE5bHH*hW6C=tpH-Sm1vzS>H#Yl702xOSZ$~ znmyzDOOy-^?u#s4d=q?TfM8jV6}B;j8N22CCNMDGH(c0aLPhPH<@;`+QayH=W+@dr zsE$Rkd>{K-9ZUVNINzMulscAbVF|vuu_x+SqQeq=i5pxImL)a)LtyqWm^~3@e*m+; zfZ1^{doIjQg4qjT_F|a50%os**=u3;Hkf@LX5WC>88AB&X5WU{f57ZNAr6QOgE$++ zb%40;5H}Fw21DE{5H}LyMnl{<@W;IYac@H0G>8j8+y@Z16ygp-Tq?v}gt*HPcMamw zA+7-8et@_KFvkjWWSCPN=GbA53+8xWPC1xU5$05ZIn`iJ4VY64=G1{X-C@ocnDaHv z*@Bxn7hx_iw*<_!Z!r2;c3Ay!kT4Gt5+NZO5*9(iQbykn|2DErX=> zkhBq!{($)s%(ub(`Y^u*%x?+v$HV-oFn=1%pNF6Mi(vi|n4bpoFTwmPkZgryJ0v?H zIU16mg5+l)c?=}K0m)M!*@WcTkUR&Hw?XoLNInS3nUGun$v?mX1s0To1!Z8t)3Bf` zEO;IkOo9c|VL`wT3ld?$QdqDY7Mz6z*I>a7SnvQA7Ker5u&_ETtOpD0!@?f0@FiF{ z02WSyh0|eS02a=Mh4W$I0$6wg7M_EJ7hsVU7TIBu6BgBkMfG7(Ls--g7Ws$5qG7N| zz@m>~(WkIzF)UgGi`K!SG+1;A7F~fw_hGRO78i%b)nIWXERKT3-C=P*Slk~LzX6K_ zu=s6Qycia*fyL`!@mW}W4Hn;kC01Bshb2z%FKGfx+Q5=_u%r(x84OE?z>=A;#+1TewOCIG8vY+V3`8TqF`BbSQZV-2Enosu_9-m;8kWVvvQ@Bb zGc4N*%g({F>#!^xmKTHN3M}`+^186R2`q00%U^`$LtuFfEdLCae*??Eh2>lPuzW8p z-w(@g!SZ}qeiv4hhZQwoMJ-s-1y=Ni6@6jF6j<>#taukz#KVfkuwp5!xC|?9!HO(c z>4KF$SXl~Ic7&BZU}Y~@ITcpE3oGA;m5X5IYFO!C3o9?f%3H893s!kyRT)_I1gvTb ztJ=boDc)m2#a09F@=)#0$ZA*^l*t6RhB;jnrf ztR4@mV`23?Se*c?4}yPn3an0p)jz}PKVc29rZud27S?oxH6Ov6nXu+7ShF409Dp^4 zV9gy^^AoK31=f~?wH0A)Wmww^);hP88G?L1h!3)UWnwMX%@ z_9s|-AJ#sAb(LUUZCDop>$<_ZzOb$zta}UAy$9<)fOSh?-C9_;9@hO1>%(BZ4C}kW z`o6HfAFQ7Z>*vGz1+e}{Sbq=J{|Osf!G>pGLkHLp3mfM7VM79J$b${P!iL{q<1?_a z8*Jw*9c} z1o*e5z;*|0*I~O4w#UHsF|d6cY+n!Ccfj^tu%j64P+*4_b_{|YBVfm?uww!2SP45; z!;U+!<0shh3+${3JL|*FhOl!i?3@fc--MmpVCR0=c@TDipIs$jmmPL>fnB{}S6|pQ z3w9;It|Zu%0lRWxS3c~n3cKsV?s~9$3haIxcE1a|kHYTLu=_0RX$X5-!k*T!=Y7~S z1NM9gd(Ok28?Yw>_SS*DjbLvRKkOY1dtZmWlVR^6*qaJ_Pr<%2u&)a2s|NeV!M-W5 zZz}BD3;T}2z7w!th5cn=e>vDc8uq^q`zOQxO|XAA?B5FqiopQ|4tU|f%W&XTI4~Lx ztb+so?Qmcx9F*ap3l1u9a2OmM3kP3=gB##rP`?`vS>cc!4msgaJ2>ga1f5I8q&s)Py5b;K z-TL$9-_T0vxXd#|Oai;rKZ|0*)_&QoXCWe4d7%noNNUr{c!R_IQcQ0+z%&Dz{wOi`2(E%15VzD6d)xG zQXG)t@VQ-?qLy6t3aL9FbvLB$gVckN zdIVCBLuv}7o`Te~ka_`9FGK1zNKJ>-TacO!sX36E52-&u>d%n+KS=!pQvbwHnjO-d zkfuPI7t+c=+7po02hw67Z8)TjgtSqRHVM++fV6Fpb{Nu*LfQ#POM$ctkah`9MZ&3Q zIMoVHwS`kp!l_PhstcTYA5NKY>T5XlEu8w!52q60)O05C6M>zd6oc@hf}!?{Xut_qy14(DpZx%zOfA)K26{&Vlcxewvo zCvfgFIQI>l`xee!g>yM@E)UM#g>yf`xqEQ#PdMKS&OZm|JHz>IaK1a7?+54m!}+h^ zd_0^_g!9R8ej%J+1?Shm`QPAz4K5Ui3wF5R#Loo-E|i1|UE#t@aA5#k7z`JNz=ct8 zVGLZ@02lVch5c~h5L`F{7f!*2GjOpKT&xBcYrw_Ya4`ZdHinB$;o@kx_$FL@3oiQM zqJWDZ!^KbGVhUWm5)@v8i|KIjCS1&eiv@70I$UZ9mm0&RW^kzmTxt)Oo`y>w!=+fb zGz%`xhD&qc(gL`&2rgZPOF3{U4=&w>OFzP;dvNJbxZDsfw}s13!sVyo^0RQ+|2$mo z0hd37%U{FgSh)NhT%HY==fmX%aQO~g{vTZa9WLL8%Maj6akvr=SK7jrE^wtATSVY&6|PQ$tM9|r58>)bxOy3`UWKbS;A#e3&4sJ^aIGd>YYf+#!nGD~ttDK08m>JH z*S>*kiEwQ`Tw4g&7Q?kQ__?+Yt`~#r8eBKvdMUVG2Ci3u>($`;Ft|Peu1|vNli~WC zaQz*){vKRE4cBkL^$fV43D>jX`VVmZC%DlVZafJ$o`M_C!i|n_qX*pR1vkEh8}s4D z0zceX3^$g-jdgHi1Kh}g8^6JgKOkL#^kR@64(U!vS0KF60Km4$_k$eGR0qh4k%^egx8wLHZd;KM(1bApI(&`)@${9Z1iE^t+J$6QutN8D7X} z1{u#l##qRh3>j}j#z&Cx4P<-^8S#*j1Q`n;V=-hbgN*f%u?aG^LdFir*bNy+Ambcl zq(eplWc&abe?Z2c;J;~sn-bhC1~(PBslrVIZkB?ZW#MKMxH$lBj)I$C!Ob|hxfpJ) zg`2zJ<{r3t6mFh?o2hW~G~7G~H?PCZ47iyIH*dquT)0&nZbibamT)TuZjHgutvBG7 zA8vgLw?2nkv2g19sg2r`dC<_X9=2bnh^GaoYVLFS*3RSdGiA(AS(*8{LLZjNyzF3S=}M4A7l-Htig~q6tadx);P$T09mg?))dHk3$osWtj{6q zd&r7|tYpYq1X)WVYXxMjhO8}+wH>l{LDpW#IsjQGA?rM3-GZzv@MqnHte+t3SIGJu zvhG8+4YEr>wga-=knM%+GLT&bvZElo0c1x*c5BFP2iZ?S_Op=vJY@HT?B0<5B4qc6 z>|u~S7P8-h?CFsG0c3v+*`Go77x>9GA$vAt&xPy+$es_`3n6z?}@paX^j^IsWpHQxkF;Lrzo3X$v{+ zA?F#$=?FQUA?F3i=?gh8LC!$Pc^PtEg`6pn^Dg9k2{|U@%!Zt~kdpv8^C4#;+6hO|8kn;=V{02GqAXkRm;*e{HTo>eeAh$H+ zR)^du$ZZ9=Z6LP;`N6uPo%1hrCLVR~7PVKwcE&HH5q-kk=gYT0&kQ$n!(qXOOoJ^0q_X5y(3Y zc{d<01M+equK@CXguGuM?>ES|LcR?7#UbDBhkO_0*M$6TkUs$O--rAekpC^@$3y;d z$X^Nhn;?HH#zVnWD3}ff0t()R zf)AkJ3n-Wc1qo2F0t!|^!DcAf1_e8zU=I}Rhk_GOkO~E-!C!C=3NAuH1{CB%!7p$( z4DQNs*9CVya94x7KKQ?u&HTF!!pz#wBgrOSBrmcZS+-mQLD~JrI@;$|A_HbqL!~h9yu6We@vWG6cd_3WS}olZK{1p=`1&Io{{+&G-KB z-gEAIpL73$w-kp`ACv~5l!H1Ng%Ed#Vd<2wFg7R;m{O?dc2g<*N@+v4_1m!EByc){C zhw^n$z5&X2KzS{cpN8_QP<|cC??ZVrl(#_nBPf3Y$v+zOR7P`Mu}4?$%uRMtUdJyf2B$_A*s1eI5z zvJonqpz<+PzA8dx3Mw;Dodwkfs79dL0@XICCZL*vY9CYwpqhi~H=+7%sD2l!`#^O+ zsQwJ9zkup5q55m69uC#xpn4)yPloELP%T3BJg8m<)oY=8``6x`P<;rhYoWRhs_UWp zBvdy*^(Cmj3e}BJeG94|K=o6ojzV<|MguTf1EaHGv;jsVFghDX<1m_l(G-mK!DsNB24TH6UV^B$uMyWOq>oAXTrpP z!o+1TaRp4QhKXxnvKJ=vFu4;<&WFiw!{m2hau=A~6()ZGlRtvVePD74O#T`s4~NO4 zVDcE4Tmh4J!Q`J{@@beXJ_D1_!Q=}t`7%tt29saHc0#oBKH3ieabP$TuwJ=== z(~U6Q1kCTFr9(v0hm4rrVoYb!(sX;m|hOkH^B5gFnu3PKM2!*g6Yp;dK{)F zVR{B;gqgW8^RF?-h|$Y-j3dd-i!V>x(;2BK8e1CzJtDtZbCmm|BjZ> z;s{#BKoA2p7?_2D1`I?n(1L+E7?_KJc^FuLfrS{@2Lt$=Vli+t25!T^9T>P91NULz5ez(vfyXiM z6b9bLz`GcD4+DS2z~3s@G%TNfx%4}{3ixK$KY2OEMu^W!7&WAVz|KVs-646VV?ofx_Y zLr-JqeXI#!O&!*BU`;WGH9c69#+rVt$zsiRSo2M+*%52zVa*X(a}L&Ai#4}k&23n7 zAJ#mGH4kIWBUtko)@;C<7qMm|*1Ux^?_$j+toaaY{(&|B#F{U#b~e`TinYgLvGzAu zdn(qh#M+Cn_7beU0&7=e?eDSnI;_0~Yu8}yomhJh)~?0cby)ih*1nFlZ(!~FSi2c( zw_xo@So=BFeucGVtgT}03~p14+ce=etys4L>xvYuI}__J#JWqc?sr&sIo4f;b=P3s zAF%EYth*cQ?!~$Xuo3Oo z%dq|ytiKKG@5lOeSpQfN>z}~-r?CDNtbYyb|AzG+V*Nj`{-0R?1=g3aegx}Bv7r_l z>ad{^8=A186&u>IA%+cI*pS7BJT}b1hV8In88#e;4JTm3N!aikY*>#CFJMFQC2V*V z8(zo8W^7DgV-Gf_u`z>3{R3=E%x;fpYQ z1%_|L@XZ*$9m98F_+AV@gyFRqUWef)F}wl88!`Mh4F4U&pJ4bi41bB?5e$!FcpM`^ zj5K1TlcJF>jHEEqhmiq{3}IwjjC=zlJ7HvJjO>AtB^WsXBL`#T5R5Fv$e|cH0wYIb z@#$c-4e6C)2{WIaZnC}QMIjJ$)9_c5{=BU>=? z5k@}2$Y&V&5+lPHsbEt8n?l&M4K~$bQ!6&LVbk}p=>Tjx5StFhrk`Weq1bddHeH8J zcVg4s*mNH@J%CM*V$*tTE(Wl<37cE6xec2;u(=1Dd$D;5HZR5IUt#l!*nBcJpZYa~ z%`36_Y-~Oco6pDQ3$giHY`z|wZ^Y(Xuz3wO-+|3{WAnY(`~Wt;jLm<=mLRtDV6o)~ z*m4ZEtiqNTu;n9c`4U@7*fN4Gqu3h4)>>?xjjeHPO<-#ZTl=te09$j|TEN!ruyto_ z-3410W9x3%x+k{og{?or)*oZ*e%N|6ww{Tt7f`fy9k#xVtuq*H#b^qnIgHN5=sb)r z!01AZF2d+yj2?i|Utn}8Mi0g45g0uhqo-i>G>opq=-C)O52F`g^kR%&iqY#ZdOt=V z!04kGeIBDP6fycLMmJ*gEsVa8(ajj$g3-?~I)>2+Y-_}}CTwfPwsvfbVOtlrC9$m+ z+cMa;Beu=MwguR>5Ze}E+hT0n4cqp_w!N_JC~UhJ+it+Nr?9QK1>0v~`w+J8f$dAM z{QzwLHMSp)?MGqzG1z__wx5XYCu94m*nU2?Ux4iwWBaApei^o3j_p@r`!(49B({Hu z?OU;<8$0G>$HCZf7Is{K9T#E8#aQgP5<6C7$JN+zD|W2Gj~;TSs>V`pRRVvOB~vGo{x0%IF7_7=w8#n>i{ zeSopQW9(y$eTuQqG4>U92C=giJL|Bs5j&f(vlTnru`^c0&MxdsV&{D9+#NfAik-(` z=hwH(v2zV}K7*a_VP_dTC$Vz|<6yiF;~f}JV7w3G1&nWp@$E4_AL9!!z7XS!Fuo_o zzmM^~F}^Rx_s4kgml$7$@gp#PG{%p`_=y-l8RMsZ9l-cXjGu+^OE7*Z#;?HmYK;FL zp_h~2BO`$Fu#9J{Z=?$s3Sz5%;$#_n6N`vL5J2)iG}?#HqFDeQg*yI;ia zSFrmv?0y5gOPH92i7X~|#KcaR*clW5iiz)HVh>Dw9~1jw;s8t>go&SF;un}W4il>} zaT_KcFJj_FOuUSV*D&!0Cf>%xdzknF6BC%2!o)NtW-!@-$v#Z>V={}$Axv(I$!}nC zAtram1M7B_?md&tT6h*s}?XJ)5y-3-)}3J)dCDXV_E56qpKNss>ZDFx7ym2&QIZDu$_k zObucxkEuDB+8$FoU}|Sf?SiRYF||9UzK5wFVCo=D9gL}Em^utoM`CI@MN=yPOg)CFKV#}OOl`!}TbOzmQ=2gL z0j9pd)K{1)VQLstGuSKk&cfaX?2Qz$w*`A+*xQA@N$l;#-rcbG80@_ad#}gd8?g6g z?7av3DKzK8C&PvG;N8eG7X(#NLmw_fzcs9DBdQ-ZJ)1V(&DjYcM?v(+!x8 zV7l0a=>(?JnC{1P7SnSuy*;LP!1Q-9{X$wrtiS?-I#t7(;G1THm2XjV)`>oe~IZ~Ojj_y71I-#p2j}0 zFNA&V*cZdTF6>KSUk~=Bv9BNdve-9-eG9Q~f9(4e_8o(L=V0Hh*tZV*{(^lQvF}an zdk6d8$G*+jw}qma2F%1T6UR&fGbzmUVP*g`LztO|nQvic5oQ)+W;e|2iJ84IvoB_r zVCE;7IU6(AVCDhLJc^n1n0XR2f5yyfnAwP#zhmZO%zTQO&oT2=5i@1XR53G#{j;#Y z0sAA^--7*Z*gqTlTzA19Na- zdmPvS2j=3yd>mMS14rS&i8xR^>Fb39D{{7hlBk%n8m>%9Q+p?oQs1;;oyllxC#d^!NHqx@NO&) zzKesKG24LI7-r*`?ZfN|U5X2(!yDdpu^Z!R&3A zy&bc6VfG%(uEp#ln0*YhPhj?K%zlK~e_*yu(QFm7W0;-9>as%$7-;DVv=3|)e!h90*1>g*f-SO5Pw=kosn$YVAg diff --git a/main.cpp b/main.cpp index 21fa1edf39..cf2ecbac8a 100644 --- a/main.cpp +++ b/main.cpp @@ -49,6 +49,8 @@ #include "texture.h" +#include "cloud.h" + //TGAImg Img; using namespace std; @@ -74,6 +76,8 @@ int bytescount = 0; int target_x, target_y; int target_display = 0; +int head_mirror = 0; // Whether to mirror the head when viewing it + unsigned char last_key = 0; double ping = 0; @@ -98,7 +102,10 @@ ParticleSystem balls(0, 0.0 // Gravity ); - +Cloud cloud(0, // Particles + box, // Bounding Box + false // Wrap + ); // FIELD INFORMATION // If the simulation 'world' is a box with 10M boundaries, the offset to a field cell is given by: @@ -113,11 +120,9 @@ ParticleSystem balls(0, #define RENDER_FRAME_MSECS 10 #define SLEEP 0 -#define NUM_TRIS 100000 +#define NUM_TRIS 250000 struct { float vertices[NUM_TRIS * 3]; -// float normals [NUM_TRIS * 3]; -// float colors [NUM_TRIS * 3]; float vel [NUM_TRIS * 3]; glm::vec3 vel1[NUM_TRIS]; glm::vec3 vel2[NUM_TRIS]; @@ -154,7 +159,7 @@ int display_head = 0; int display_hand = 0; int display_field = 0; -int display_head_mouse = 1; // Display sample mouse pointer controlled by head movement +int display_head_mouse = 1; // Display sample mouse pointer controlled by head movement int head_mouse_x, head_mouse_y; int head_lean_x, head_lean_y; @@ -446,10 +451,17 @@ void update_pos(float frametime) float measured_fwd_accel = avg_adc_channels[2] - adc_channels[2]; // Update avatar head position based on measured gyro rates - myHead.addYaw(measured_yaw_rate * 0.25 * frametime); - myHead.addPitch(measured_pitch_rate * -0.25 * frametime); - myHead.addLean(measured_lateral_accel * frametime * 0.05, measured_fwd_accel*frametime * 0.05); - + const float HEAD_ROTATION_SCALE = 0.10; + const float HEAD_LEAN_SCALE = 0.02; + if (head_mirror) { + myHead.addYaw(measured_yaw_rate * HEAD_ROTATION_SCALE * frametime); + myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + myHead.addLean(measured_lateral_accel * frametime * HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + } else { + myHead.addYaw(measured_yaw_rate * -HEAD_ROTATION_SCALE * frametime); + myHead.addPitch(measured_pitch_rate * -HEAD_ROTATION_SCALE * frametime); + myHead.addLean(measured_lateral_accel * frametime * -HEAD_LEAN_SCALE, measured_fwd_accel*frametime * HEAD_LEAN_SCALE); + } // Decay avatar head back toward zero //pitch *= (1.f - 5.0*frametime); //yaw *= (1.f - 7.0*frametime); @@ -608,24 +620,25 @@ void display(void) glPointSize( maxSize ); glPointParameterfARB( GL_POINT_SIZE_MAX_ARB, maxSize ); glPointParameterfARB( GL_POINT_SIZE_MIN_ARB, 0.001f ); + glTexEnvf( GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE ); - glEnable( GL_POINT_SPRITE_ARB ); - if (!display_head) { - glBegin( GL_POINTS ); - { - for (i = 0; i < NUM_TRIS; i++) + if (!display_head) { + glBegin( GL_POINTS ); { - glVertex3f(tris.vertices[i*3], - tris.vertices[i*3+1], - tris.vertices[i*3+2]); + for (i = 0; i < NUM_TRIS; i++) + { + glVertex3f(tris.vertices[i*3], + tris.vertices[i*3+1], + tris.vertices[i*3+2]); + } } - } - glEnd(); + glEnd(); } glDisable( GL_POINT_SPRITE_ARB ); glDisable( GL_TEXTURE_2D ); + if (!display_head) cloud.render(); // Show field vectors if (display_field) field_render(); @@ -800,6 +813,7 @@ void idle(void) myHead.simulate(1.f/FPS); myHand.simulate(1.f/FPS); balls.simulate(1.f/FPS); + cloud.simulate(1.f/FPS); if (!step_on) glutPostRedisplay(); last_frame = check; From 125eed350a9820db8be11961ccfbf568ac6598f8 Mon Sep 17 00:00:00 2001 From: Philip Rosedale Date: Sun, 18 Nov 2012 10:45:41 -0800 Subject: [PATCH 008/136] Further work on Cloud class --- cloud.cpp | 23 +++----------- field.cpp | 15 +++++++++ field.h | 1 + .../UserInterfaceState.xcuserstate | Bin 101370 -> 101260 bytes .../xcdebugger/Breakpoints.xcbkptlist | 6 ++-- main.cpp | 30 +++++++++++------- 6 files changed, 42 insertions(+), 33 deletions(-) diff --git a/cloud.cpp b/cloud.cpp index a0bdcceec3..18a05ced44 100644 --- a/cloud.cpp +++ b/cloud.cpp @@ -63,34 +63,21 @@ void Cloud::render() { void Cloud::simulate (float deltaTime) { int i; - float verts[3], fadd[3], fval[3]; for (i = 0; i < count; ++i) { // Update position //particles[i].position += particles[i].velocity*deltaTime; particles[i].position += particles[i].velocity; - - // Drag: decay velocity + // Decay Velocity (Drag) const float CONSTANT_DAMPING = 1.0; particles[i].velocity *= (1.f - CONSTANT_DAMPING*deltaTime); - // Read from field - verts[0] = particles[i].position.x; - verts[1] = particles[i].position.y; - verts[2] = particles[i].position.z; - field_value(fval, &verts[0]); - particles[i].velocity.x += fval[0]; - particles[i].velocity.y += fval[1]; - particles[i].velocity.z += fval[2]; - - // Add back to field + // Interact with Field const float FIELD_COUPLE = 0.0000001; - fadd[0] = particles[i].velocity.x*FIELD_COUPLE; - fadd[1] = particles[i].velocity.y*FIELD_COUPLE; - fadd[2] = particles[i].velocity.z*FIELD_COUPLE; - field_add(fadd, &verts[0]); - + field_interact(&particles[i].position, &particles[i].velocity, FIELD_COUPLE); + + // Bounce or Wrap if (wrapBounds) { // wrap around bounds if (particles[i].position.x > bounds.x) diff --git a/field.cpp b/field.cpp index b4a9351a46..d08c93822c 100644 --- a/field.cpp +++ b/field.cpp @@ -60,6 +60,21 @@ void field_add(float* add, float *pos) } } +void field_interact(glm::vec3 * pos, glm::vec3 * vel, float coupling) { + + int index = (int)(pos->x/WORLD_SIZE*10.0) + + (int)(pos->y/WORLD_SIZE*10.0)*10 + + (int)(pos->z/WORLD_SIZE*10.0)*100; + if ((index >= 0) && (index < FIELD_ELEMENTS)) { + // Add velocity to particle from field + *vel += field[index].val; + // Add back to field from particle velocity + glm::vec3 temp = *vel; + temp *= coupling; + field[index].val += temp; + } +} + void field_avg_neighbors(int index, glm::vec3 * result) { // Given index to field element i, return neighbor field values glm::vec3 neighbors(0,0,0); diff --git a/field.h b/field.h index 2d0dbac757..e1d26206f2 100644 --- a/field.h +++ b/field.h @@ -27,6 +27,7 @@ void field_init(); int field_value(float *ret, float *pos); void field_render(); void field_add(float* add, float *loc); +void field_interact(glm::vec3 * pos, glm::vec3 * vel, float coupling); void field_simulate(float dt); #endif diff --git a/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate b/interface.xcodeproj/project.xcworkspace/xcuserdata/philip.xcuserdatad/UserInterfaceState.xcuserstate index 9cf966bff86303aa87ea0c9963c3cc86906f6585..d1f81ce64c67c4f45d200e9d7b00025121c7c034 100644 GIT binary patch delta 25123 zcmZtN2YA#~xgw9d|G|5A_ul*bXP%kooHO6|J>N6$_dR8vGd~uG?kEne3M#CR zJal)h%aW$->$B^USUh}lct!Z`@IB$x)vpTQ8!ine!e!wN;f>+#;T_>U;l1I9!$-rX z!Y_qi4xbL6373b@hF=N)A^c|et?+x{_ro8D{}%px_{;ED;qSt~1Q!Jt2bToZgBn5Y zpnlLixGYEu(u3AP`=CS6E$AH#2nGg&f?>h+!SG;AkQ3y_g4w~GU~Vukm>(<%76yxg z#leliO~I|fZNa_4eZiWbICwDF73>c71bc&h!T#Vta4MQd{3G}}_$K&g@I&xR@N4AKNX1B{NaaYCNYzLrQZrIN(jd|_k``$b zX&Y%5Y1u8(J<=o6J91^@>d5tx;gK66Ig#8*USx7)R%AgW7P&F9EOJZa*2tZadm?Kh zYY#N7c;0K(MrK|6)`7#L7q#i!rb9+bn>MXF9O&>tVN$J;S?#)a>o9sz>(=dCXQZ@l z-?r0|k&D{2>d^kcqaWUo6wEty+tj%u7PZPq>Da2xl97wrcW#rB((yp*M^`1+8JYK1 z&Ym~27qw~Ms_i-3wyoMN8MUZQ`%dj!9k}(gqZL;7{Vb^aeU%?%R{1G@{*CeTZ;GFv z6hHsw#=RaMnS0*QPyTpd&X-+6$e9AtO0UDcFHNd(e#?q))RbdbJzjpXo?WB51eM`@}$R*)9;hW;gul&2R{9NS~7rEFag7B^3JBzw) z2;UaIJ$y$z`Hgt;oAKngHiTDdCNrM=c5!aqq`a1E!Vea8+ZZkm-yeP;yf&WvPCWVD zc=Dg)$?t6pm&gpS4?h%7e*gE*Ka3~;wfM`rNd;9mhqo4WN`$w>lmC(kZ;K~?P#mq7 zRJX#e@b2P44U-zjE)DMwAN&vEP(1mg|Ij)Tex#^d>Hpw)Ec{eaw=Lnv!jFfKho19eY@hF?EN;k9`3KN8_L;>lnCPBQkV@ViCb65)5^$=@Wxe~u^r zbL8Zyv&YYzkT-7pe>i>+{_8o=hwuTMcvB6Uj&uUfkUR0opbPC=EEmX$(#5e@TEZz)ciN!pjJF|-hcem4eI@` zs0ArOlcH`Lf`&n(pm98Oems;M4}~_!44MV0=b{n1ARfA?cyCJ5()yV}%YTa@XcZ4# z_#d}zgLda6({xCupi5D=#Kj}x=N(T3UE`rkepk2?L64%Y8-kv}*2ObR9k1@TZ2 z4@Kgkn(iXI&IL8$1{OY}8fxuNR4jy2L}B&&g?miNRZ|zj~u}@!u~^y6~d^>f=wrJH<&Al7i&2;9b*x zFaGx@?*;D{_e@R7O!`all+>h-$$t$#E$Xy7{%z1V_$>HbX7KlTsCzurAd)n8VwTU@PqQjI#V%P*=O4|Oi`Uz-1G z@YA`t{TvVVE$-DksbMFPP*Jx{k)+6ZkqVLXBgyg574cC2c<9P_=&E>Vz^3rZ$c2%M zl4?XQi6_4u4-Jfmt}YrA4_#Bdwt3QZmsF30i@GHuHR7Q`#oskg%DAL)IP{N)M*fS# zfO8xMMh3-0qvE0L|8Te_a^3&wVrXPoJd_m=jUIVb-+uk_C(oWXv+?B%OPzcC$8Ka~ zbWx|^f6f_^?8umSXiPkmQ+#JyQoYJ!Bja2Y$&ZYeADI}LRQyO<(x8;Qb3!VNhbF%o zDTqWPg^?*PicF16OBx)RlypsGMsd^hq-$bh&KeFO?;ZOV@!XNhk zQ~v)VEI(=RxmRRHuR7ow~RU@fZac)LZ zm81uXXJjO`sqkQ=q_{jIsab`x$hzXw8A;tLltvQ8l`@l3ht#8DV=A_yVn-_Wq~ZW7 z4x-`^Dh{RM^;8^1#nDvEq2gRBuA}01D(Nh&@~#b>GbeDU1Oq{gu?srU~nenZ7? zsrUmGf2QJZR637J$yB5YiLn@avLi5 zq;fAR51{fODi5aewN$>2%2{-%oK59iDvzV`1S*$O`FSdTNR`S|sY{jmRB1_-)>LUr zmG;GhTO>td*HYy=stlvbaH@=;N)}bdP$iEl`Ba%fl_gYJMwJy*xs@unQ{`T&tfxvT zRlcOk4^%y$suii)fU3=?+LEfBsM?FF{i!;Ls@G99hpKtRs*a=Tc&bjN>U64}r0N-} zo~7y^sQM~Z-=*q%RJ)979jMlcYF((-jcWa<)}LzkQLT(>8>zOLYFnwck7@^~_AAvd zp?XECSEhPZs@JA^T`|=Qs6LPC3#h(`>Px7;oa(nw{SB)BmFgc;{S&HxM)iME{X1%O zp+|3vs_ zf@Fe91a$~f2r>y`EeYBYbRg(N(4C+sK`(*<1VahN5=%SwUna zkpn~?CGr}PKNESM$cIEeBJvH9Z>d>fSMOk^Ac)Sq-JGm)}>~BYNk-L5jC4o zvzeHh&8azPoGl)EZ5#G1Qt*tvI!AqSiyy+DfhM)GCir>rHCCO|2iOeIB*Xr*;EsH>Y+Q zwI@+~2DN8V`yjO+qxNxX|4N+;sB;l@nouW`IxVTwhC1!2)15j!sWXx~S=7m<&IIa2 zsWY28_fjYJAax$4PC0enq0XPF^B#5Hr_SH0^A&Y|q|VRO`IWk;dmeQ!p>9R$R;F%M z>Q<+2AL>q}?mFteNZk*q`w!}VL*4JF`vY}UE)BH|q7EULWf9qh5dNT~ED9)SE%QrPM2;-ZJW~px&+2TS>iD)Vqgz4^nR<^)^#) z8})Wj?-=zSr`{9Pds0ljr>GyHehT$7so#?N?Wo^@`kkoXo%%hg-;4TJQGW>aqtu^I z{e{$DME%9ozm@v8Q-2Nh@2CD+>X%S|J@reezk~X_sK1B$`>20_`iElF|Bwb%XwZ`e zQ)#fC21jY|EDfHg!6_QNOoMV7yh4MIXz(Qs{y~H9Xz&9Kej?>UQZ6Q?GAUI_sZL5w zQfiY@my|S8x|4DRDMLxQo|M=qQbv<9hLmxnj3;FxDbq>0gOvM7DJEqtDJ7)rB4rOL z`$#!J${|vIp<#%I)oB=}VO<*5r(p^W8`1DG8m7}QlZGv6*nx&wG@M7Q;Sw4yr{OI$ zyq$)3(r_&eOK7-_hC67ui-vn>xQ~VhXn2T*Ptx!y8a_kA=V4NuYVWg31%qbfA& zNTX~TEum2vjdsvz7mXgL(GxT}7ym|2(dZc(Jx8M#XmpB3FVpBX8ofcIf70kz8l!P4 zja$*U4UMm%@dz4^qVXIWFQsu2jknYI0F4jP_@6ZXl_qG?kS1v~$)HJpnp{hh>tZw+ zN|RigjHSsqn&i`D22EzsWGPLGXmTS>Zl=j{n%qK@)ik+}CdD*)fF=*pWIat@rpXU9 zZBEl0Xu5)?8)&+hru%7nh^B{WdX%QeXc{|1(>G}P7ES+5)Awom0ZqT6>DM&bv z&DIcWc0bM5(yWAL>uL5m&AuhI0;$QQUP@{uQmc?!jnrDC)*-bXsSQX?C3O&~!$_S@ z>H<<1k-C)BB2w=p^)6CNNL^2ADXC?oZX|WHEU8;bJw)nZQXe7pQBofx^*E^~Nc}6# z&!hQeG{1)Cb7+1a&DYbsl;#PVZ>RZA(%O>Np0tjnbuQlBG3muvDQO!>+f3Rv($2y5 zkhY(+L!=!c?NQPmC+!4jPm%U4X)loW5@}~hdxf-DNqd8|w@G`KwD(E-khG6U`;@dV zNc)PkZ%F%&v>!?Pg;+Y$E07){{UXvYCA~7~)kqJMUX%1Xq}M0CA?Zy>PbEE#^i0xQ zk=~Z{4y1P`y&LI0N$*X1Khm!xeIV(BNgqP`Fw$=zeH7{0V$yR-A4mEG(kGK%NcuF= zXOcdL^!cPOB7G_8anf%leFf>ak$xxXt4Lo>`Wn(7Aiae2he$6YeG}8WL!o@1{p2M zXhTMOGCGmbm5d%_^dh4#8CR2W4H+}YSV+cVGVUc~9T^Xi@dz1DlJOK7pOEo28UG}+ zI+=CJtWRbaVwru&>_=uEnFVAPl6fzg>&SeF%qPfvp3E1?{2Q77AoCkqG@wOuTBOlp z7%j4Ckwc3zTI`_3E?T@ni}z{q0WE9MvJowt(6T=*uchU6Vp=YwlL)VmDZbSy@%HOX#Fy+U!(OKwEmGc6=;)8o4T}V zN}E*L^r6il+6<=6T-wBF6Q|8K+U%#zLE4<5%^S3Ni#9*e_I%ogXdBC@ z?UvDQ1?{%cZa?h~((Vtm`xCKt@6!Gv+E=4}4ce#CzAf$B(>|Z}Q)oYp_IJ~ME$vHa z|2XZRrTz1?f1mcB(*AQg1axRXhlX_MOo!fd=u3xlr_g*lOrS#%9d4z=?Q}RGro%Bh zJWhw#=39u9q0>clx`a*{bZSqhj&#bQ(aCC zm(e+$&I9Q@jLyU9Jd4hY>AaNAd+2l!an6BOE+K;aN={lXR3+TFtuAAw)hpzkR zT29wD>H0R^LUgN4x2kmOOt;>2>r1!Ebel=H*~Gf7rP~I&ZKB(=bUQ=0vvjXY_u6!? zOZP0g=hJ-x-B;250lGg(_s8h|4Belj`zLh&n(qIkM-_V1qDLKiw5LZ8dR$JAiS(FG zkD2sXO^*^WJ=W9X8G4+iM>##dp~o-u_>G>8=$TH>OnMHc=MD56NzWzpTu#qh=y{5s zf28MYT;7Pw)44p8%cpYrJT70r`XuP5mBJiT6| zcP)B1qIVN|PowvIdM~8+UV1-5??>tVBYi5+Cz(F|>2oc8uA|SL^jSln`|0yGeLkem zNAzt;-%j-HLf`rHjng-F6Maw8_Y{3!rr#y>t3khje#7XOO}`xaCFr-Eemm*+0sTIw z-`}|+l`C3tMH{Y&a>Z<}n9CIpbH#D4IKdS^(f@qwan&Is>B&oXxVsVUFjxP| z;0t05zKFp+8GI#!2Qc_S25)5WW(I%G;BOiHJ=gT+nt@z%HP@`+nuoY1!8IRp%@A3$lOeGc3|YmHdl>Q>L;lQ=_qnbU z*Y)DMK3sPz*WJT)_j27Ixb9C}_bx+gFti>+8!&VtL#H!zCPPm$^b|v1W>|<}l^Ir* zVIvtfmSOn}D`nU=V#9VY>`R9Iz_6dVejwKmR%+MkO<<5~HdxY6zo7F={lU z?q}2{Mr~o#Rz_`O)WeKA!l;vsdOF6aXBqW8qh4gx%Zz%1QExNqT}Hjns1F(SF<>7Lv7?tT0ACUDCS>KcO6Is778lx*P`Wi-$VDxxK zPh|8|M$cgMEJn{|^b$rFG5RJ(FDEv7C8G~8`Y}eo!RYrH{Q;vtV)Q4B{*KW}Jg08S_13ek7-JjGW%&^d%>UoJr&qkP{_mJ~<1?Swzlaa&9H( zc5>E`b3Zw2$tfXcJvj+-c9OG)oc-h+BIgJ>kCO8#xfhUIgWQ1JTIALtw?4VChUBJ^ zn?Y`Ca@&#Hk=(xIUP10aa<3-$8ghq`JDl8+CUC-)$^hsixk?u+EUOzs(Cxv!A>DtS%GYe8OD^174Pm%J;;yOO+toUiF}5ya8!)yJW1BLzIb+iq+k&yJ8QYGr-5J}Hu{SU_m$73R zyO^=d7`uY88yLHjvAY@jN5;Ox*gwY@`wQbjjJuF=jTx80xE72nWZWFa&12jV#+_i? zN%GGl{}S>mlHZ*C*5tP(za9C#$nQgbU-J8re;xV5$j>2vEcxTfpG1C?{Hf%}7LdRA zT!H)>$zMkPE#w~||1XTM#`s>0pTYP$8NZhCC5(TF@nwwP#Q3d@KgRf{8UHNfPci;9 zaXOx(c4%}m_J#GOps!^C4u ze4L3Vn0S(jPc!j3CcePLPnZ;9QYMpzGHC&m*2kE%g-P3(w4F%@m~@DOE);a9;BpH3 zP;dnWS5YvCf@>(aj)Lnc7(qc61!E}4qadGxi4+u2FolBY6wIPvE(HrHSj@RIN5PF0 zEIU`eg@W5DSV_U%6x>TeF$HTWSVuuA1sf>XOu;q^c2cm1g8dYnBYK2_M=5xmf)f-x zMZvQaygp6$)OZ;0+4irr=!)-lyP0Vg(;l@F@jfQ1BH6-%#)!1wT^o3sFQX z5DgK%i0Gw6D-*3oG)%N6(K_;>2Z)vseTZlo(M>U;TZ!%`IdXnffM4u;mis)&g zXNmri=<7t^BKi)|_lSN#^dq955dEC!mqfoN`Yq8Pi2h9UHww?AFqTZ=g%n;wVI>Ny zQdon+2!*vNtVdxAg^ej}M&V@?W>DCY!ZsAPr?3-+T`BBAVJ`~%QrMru0TfTTay?USV9G3}EN045rmSVk2BvHhGvy_wyvmfzTHM zY0H^*3)6No?GV!rGwrWT`+JOOUopKV(;G6qG1I#<{R*aE$@Ga#pU(7|Ouw7yYnfic z^rK8a$@Hh0{w33YVERwYXuyo-%t&L#)yx>qj1kO;GvhX9+`)`(%-GM2gUpDPGviHW zyv>YXm>FW`h0JWg%;wBYW9AjiyoQ-Wn7M$NH!|~PX6|I>A!Z(C<}1v6o0;!0E16l9 zm{o;Y>C9@!tPac?#;j~+to_V7!>l)$^%k>!W_B{OFJN{< zW~VVbgW1EFoz3hVX2+R*8?*0V_D*IWV)kKXzsKxPnEe@Z&STCc%&EwnZp`V&oc_$2 zEN0G3=FDc!8sLIt!9W%aW5IA1tYE<^7Tm*vBP=+7LI1&7#6Nz;VKs1!@^@Me42&NvhaNte#*kni7l$ZqIxW9z@jTzG=xP% zS+tZzD_C?Zi?*?7KZ_2s=p7b)#G=2kxGIZlv$!scd$D)`iwCiICW{xbcnOOiV)0fM zZ)fp2n}4wQ8WEd3)(Ut{Ty#42!Z=8ZLraqf;otTnOg ziH$k8B__6!*e+swi2X=W1&Wd>x`Lu>C>lc1I*K+^w3VWdDf)_{uZi~}K7jZj;`b9z z5Z^%jE8;&A|Ct;6bK|w#7`u)eZ{fzfxp6f&p5?~3xbaWi)PS3sb5j~OjpwGR+%%n= z4sz3D+;p6qFW}}X++2;DujS^E+?>VD4|4NHZr;qzf8plOxcLi~Ww5M0%Q~`b0kLH_ zvg~G-9b?(kEPIw^zp?y6mS4>BJ}e)^^1&=$$?{^BKfv-=SpGK4-(f`)R%EiGB`c=0 zVgV}_vEpG?9B0J|ZmG;IHMylWx8!omWNwLyxn&=>9Oaf{+zF+uZsNx7FgdM%>nf+a_|`bZ(o;ZQHr+0Jk0Dw(q$;iQ6l1dmnBe z#O;H*eI>US$GH6gZa>ZKuXFpG+|igjGPt7!cg*FE7xUvgU5qtYu9JYu;te$E^8;;!KJ=P~3^)c@!5>d?UptD1L$BQ`~fAq+`$u#C817%o{TsM{6Ze0>{hxFHx%K~n);!RK2fFdVavr#g2kz#9Kk>j{ zdEjH#UdGxstZm2I>8xGA+C{7_VeK~7?qKas*6w2MQPv(~?X#?XfweEO_6%!ZVePA| zeUG&tu=XR?#y(-~=dAsb2UB=(AP+9&!EHSF1`qy|lJh7zpOOnGxtNlQlvJUl2_>17 zw4|gRB^@d0LP>v0uA*cxB||6~M#)G@MpKeQ$rMV8D2Wp*xs8%yN*f0$+wjJK*>*({K`74yNGoG>uR#D0qYvEt|{x9v#urU z+OV!Y>pHQnrFcLSA7)2tRL@tSO zBqoqJxBp8Nl9)zfCW$#D=95@NVkwC@iJM8RAaNUsJ4vh}v6{ph5)Y6lA@LB2G7_6e zY$dUS#BLJ%NE{?_n8Z;MkCAwS#7Pp*ka#{u;uML~B+iofBZ=2ZyhY+267P}tfW${6 zJ|Xcri7!cfP2yV;KalvD#BY?HM_Dpu7gBZ!WtAwaN?8rcB9zsptR7`4lr^TT8D+7{ zD9fO%C1q_WYfo7x%DPh4gR)+f^`)#oWdkU?nzCyt8%o)5%0^N)nz9_q#!@z(vPqOh zDVs{!49aFxHjlD}lr5pGh_ag~JGcKYyOpv#D7%ZYdnkL5vUQXlr|db(UZCuK%08v+ zb2ePShAM2R#)eL8=*5OUY{+9n0UHY0a0?smX2WVW>|w(ZHXLQc`)v4>4WF~I5*s68 zHr8TeM>by0#@=i!WaAt*&ST?IHlAeT(`@{TjX$#SXEwEFQx`UMV^a>BCb6l2P4}?r zK{l;p(+M`cz@}4d`iafwvpK}(Og495b0;>RyI{3Bi_O_=Ud-lYY+k|U&1~Mo=6!5F z&F0tH{3ctH*;0uuRoK#wE#2ADlPzP}5@pL2wya^xLu^T~<#Dz=%a-Ta@>jO}oh@In zwHjONu=U*ikFD+4+MTUE*_y@He6~(t>q@p3v-JVCo?z<>Y(2%+kJ)<8$=7VF&9=sD zYs$9E*>)A%2C{83+h($DHrwuD+kCOK zvSS51Ze_;~b{u5K!(w)vX2y*gc5d*R%Tub}wZ2P3&IA?oI68&F;PIeuv#3 zvHNfAsmPvyJvG_Wi9Nm8(}z9h4jX$WvnR@)683Cj&ldK)#GY5#^E!JkVsAC})?jZJ z_V!_KKlV;y?+o_NVs8n1H?emMd&}ATCVSsz?|1COzVq038T;C>uO0ic**AgMzDev` z&At-$t!Llk?0c4d&$I7q_WjJhU)kS`{Vmzwn*GDspTqt<_OD?7D)!&Q{)6m)jQz*i z|0@SB;J`&3xRL`yI53n0OE|Ec1GjKsyO;w9IBw-4 zhvvjM^azKZrDVNM(*x<;ZA`jOWNij;!OzW{zy-$VVJG_ver!|KO2!Jkp&#0JdTay*anX6^|&Jl2=T zmhsq19$Up@FY?$QcMW=kWMa9xvkYXL$THkC$`20mqw*IiAMx2^^os z@fjT7&hY~rKg98mIsO&LzvhV+JkgOSI`hP0o>;~cD|q5KPdvvHFL0tFCjw5?P?>d3r~H>(=B6>`^ z4xWy!{C4Z63?FIxu!hVg6CTC++3cE@m!qePVn3dJa>xclZZWkG0$Jh^TT*Po9A;MHM}r{7w+VRHN0>?FZ_uY{>lp<^I|7n?8S?Hc(IfhxAEc* zPMy!GikzybRRDj^U|xl z^e!*G$IJD2xfw4v=jBPfJcE~K@$w#CKElgKdHDxUpU3I*Iei7Eui^9%PCv-$jWJGd z=JY=}{S&8u;Y<(C^ykb~oLR=1m7H0{nSGo&%9&%FInJ3UIrAcCUgFH(IP)!MzURyj zocWRR3n;&c@(Gkrro52yX_U{Td=BOFDUU6pd@1E|%5SE81?9I;%AcV8B<0Ug{ygQUC_hd4S<3%N`Rl~W z-=h2-%HN~>1Ij<5{1eJQ=j;H^Ud`ESIXjfI!#O*Wv!gki!`ZQ%9naZGoQ-mJDraYK zb~b0{{l6COJGv8s${zUVT9!pKNhXubWKt$QlQOy}Ac)dLQKTt?q6osG2m%(G$cW!r zkx_(2K@bE%nz)E4f*=;82&jvVOrJ@nWl|@-(svKP{k?zgIq%&2&im*6^WHs=u1n~; zl&)2DT}Ibxx~`z>D!RTy*R=||uBYopx^AZHR=RGd>n^%}Lf22}x{t2=>3Wc^hw1ta zUB9R6F}j|l>uI|FOxLq?{hhAo=-NuxcDj1$>Z5Cnt_jw3vSxqQ9Kf0bS#xlKH6LKj z2U+u7*4)CHAF<{R*4)LK`&jb}*1nFlC9GY@+H%$|W^Em78(8~s)}F!IGg*5!YtLov zMXbG;wZCHR6RiCsYoB55pIQ4HYuB*u&8#a_vaX7CwXCaST^s8)*=yy;y%S>n~^hH(CE})?dZ? zwXEMkVSO*_`&mE8hP~KuAR7*5!+Y59UN&6Ch99uuW;Wc)hTGWi8h4fr^K5vL4KK6t zjcnY5jYqNZ<7_;Mji<2jR5qT)#P7r)~Jw0qKXKMpno7mdQ);6}jjjemL z^&GZ-g{>>u`Zcz$V(S%by^^hcY@K533|r^f`U2Zt$F?`H?cHoUf^A2#?HIOwh;1KN zu?~pDLUvZMvxc3gu=8wop3BZJvhzH4u4Lz>?0krwPqOnV zcK(T-e_>}AI}2;+T|#d&y{+`N)7we!KJ>nW-m~ewnBGh1y^P+~^j<;lRrG#`-W%z? znciFJy`A2>=zWadetKuvRl}|pcD^g#7N3!d93cEhWu1~P*WOkj(uCv&6 z4!gd!fcV`u3ym0QwH3 z?-2Tqpzovfokrj174&_PzOT^tRr(6_eVx9`>ARY~Yv{XC62m1d+|3B&Pp?@3wgY@sFe~kV~`e*2Wg#iq_o`HX1 zU{40h7^r5Tg@Ly-P}qlo0~t7kfx{R$oPi@4IF5l67&wuElNdOKfzL8<0RvxS;7SJm zlY#Fu@B;>Z$iR;nxPyUv8TdH^zhvN74E&mb#~Jtw16>SkV_*jZ!xRQa8JJ*Tnt?e6 z{)fTWG5AIX|CPZvGg!`GErT5lu3&Iq2KQ(1T?`(?;GqnDkinxF{1Ag5Veq32eu}{} z8N86eFEhA`!OIw2&EORbUZr60dIoP~@MZ>YW$<oebU0 z&^-+OjGjiLhPoNr#?VfN`WPBxXp*5BhUOW1k>Lu4-_7s`89tBU zl?;E2;cFTG5yQ7L{8NVSWB7iC3lB2v{D$^ zi;)8vIhc_TFmfa#$1w6?MviCXWJXS9PVB{=D&SPX1BUdtVBO^C4awj8qGjb0j zKV#$7#UG8GRDX}BQG-Y3Zoc(J)=d8mM~hzXa%E{ zj5aX(Rz~+{^nHxJpV1F7`VmGy%IL=#{UoEGVf0K!&t~-VjDCsHs~CNd(I*%kVswfz z1;*aYSS4dsj5RUV%2+#ND;WDX#`b6IU5p*X*rANQm$6SVwwkf)82deAPczoVST|!s zjO}J@hOv3ZUS#YQ#xcGp<3)^@FkZ%Zp@Q*cj4x+=FUAjK{9wjE!1$4jAH(>E89$!! zlNmpi@zWVUgYmN%KacU1j9#|mA7Ol!@#h(TiSbvN2orB&VgVDSOe|tzF%xx6v@!8ECJtia5GIab;wUDL zW#Tv{PGI5`CQf7G(@cDpiO(@{J`-PM;&KHO*D`S(6SpvN8xwajaW@n9F!2Br4>9ow z6Tf5P4@~@tiM34ZV4|0a5hli&m||jt4# z6_`An$s?FNipgV{Jdw$hm^_8a)0q4;ldtjaX7X1|b~D+>SXG0rp{$*B~y1Y_1gVs>Jg@%VCwHo{gbIpOm#E0jj5eX^)WTZ z)Fe|gOwBX(BGa#Dx`gSaOt&)K#`N2m-iPV^m_C5%1DQUY=_8mvis@sSK91>=m_C!j z^!ZG$V*2Y$U&-{Dr!#vFvtMC$C9~gP_M6Opo7w+l_Pfmf zfY~21`=bK0cQE^7W`EA?Bg{U@>_3=&j@ix3_AtAh*u-bCb-?FgMTK zi_E>k{J&6`FJ-=p`Q^;FGyitx_ho*6=HJEqLCn9O`42LGH1i)~{v*tPlKHckKcD$k z%zvHvE1AEV`D>WJj`yis|(;kKQO3~p`bgUG8 zSc;C9qK`_^=~DC=DY`(4zAQzTNYSNIv`UIDlcLp9bcGb%C`C6*(XCQ+yA<6eML&_E zpGwhvQuJFX`n?oACPhz3(etw4jj~`5S@0$)EGUu%$H{_E$%4~l!KY=x8M5FGS?~*4 z@Jm_nD_QWcESQtx*GusmrTAZ^_)SuLtQ3DzicgW^)1>%xDZWRFAC}@rr1()Oeq4(G zD8+x4;%BA!?^3*0inl1Fc$XB9O7V+Q{IZlpO5PwPdq_!>4=GtKC09tvRRt-z zUP^A2l6$4(J}G%XN*5rwfTS|wdbhng_N$G@?z96M9 z$-=#5;UTi{J+kn9QdszYS@`^J(A!V<~q7|~}7+Lf=S#+-~dP)}kO&0w_7In#@ zb+TxuEZQZDW@XXyvgjpQ^s1Cc%HJU6dr0}4q2{8lO+m5Rru;*V1Cj8yzZD*h@JJyNk?6|=IqLKg2Yi;t4U=g8tKW%2#8c#SNcl*O;g60)R3mXyho3RzMqOR8nbGFj3f zOPXcL-m+w0S+c(@d6z6XP?j7lOWq?(-X}}WmL>lwOKy`TPbp-{4yk;jRIZT9eWh|g zsoY;G50%Qpr1HH|`C+O2h*bW&RDMP(&y>ourSkJq`6a2mKq|j1m8+%lTT*$YR9-EW z*GT1cQh9?^-XxWeOXV(Ex~DAdl%*#WWa)*n^oO$aE?Ih?Ed8x4eN>h{BTJu?rR!zs z4q4hOOGjksxGa5Ms*0p)kyI_0s<%tk2~zd%QuS%6`m$7AB2|}4)hemFOsZB()fG~8 zl~jF4s;-r)!jGluzohD3srtE8{ZguaB~`zcs^3c0qf+&_RBe%}VX1mms>`IhL8{*= z)kjP9SyKHqslHySZ7(MXGzGP`y>E2c&vf zs&`BEj8xA{_488mMyc6DYKo<1q12R1%@V0uCN&LG(m{^G&Jw zuGD-_YQ8TuKaiRqO3ja@=D(!oUa9%H)cjH*HGhhkztj#%?ens%R+jyn zEIVA5eMXjDA zcd68^lDf;J?z>WVt<+sFbvH`g%~E%()ZHs}_er7dVX6C#)cs!S9+SE!rS4g&`@7UV zCv|J3ZiCcqm%3r8o0qy5r0!LzkJP_G>i3lTBB?Kt`o&V;B=s#)-zN2)QvX(|f1A`F zB=v_#{h?C-9;rW8A@v`Y`V*!8B&k0|>Q9sUGo}7)ssFsxe@W_Zkow1^zEA3>q<%)~ z=cWEdseeTpNW%hYD3OLzX;>%?ZPKu>H0&o02S~$#(r}1093~A%Ny9PH@Y?)0oG1+^ zNy919aJDp@D-GvK!-djtu{2yN4XdQ#GHLj}H2g>!ekKh+mxc$W;bCd`jWqm08lI4b zr=+1v8n#Hopfn6i!?-j|NyDr(yeJK?NF!-1ERx0=X>6CqPHB9%G#)IC?~%s$N#h5k z@knVrMjAgXjmJylXQc5=X*^pR&y~h6NaK0Zc%d|2ER8Fr@oH)Oi8MYWjgL#?dTD$? znifb?r8Koj({d>^bx6}&q-if{+FP1VlBP4I>2uO_t~7m7n$DM|i==6#G<{2&zAa7H zNYi!Fbb~bABu%$V(_PYZw=~@&P3xp-RGQx?&BfAOD$R?ed9gG%Npp)d@1v0B{iOK- zX+BVz50U1>r1@}ZK0=yLl;)G9`4nkBO`1O~&7YO#&q?#S(tL?DUoXwKOY=k0{G2q8 zOY=+8vP4=|NXx;}a*VW`CM}mr%PMKPOj=e;%M}G_xl&plm6m6u<kP7s>Lk$nvks@`5b? zmMp(gmR~K)uaV`~$?{ud`Mt9IVOjo!EFYHTuho4)mcJ~^UzN5!q-{@WE0VSnDYUgp z+uqW)ue2Q^ZHGzQ;nH@5v>hdF$4c8Lr0ryBJ5}0Fm$uJH+nLgKiL`w~+OCqe8>H=i zX?s-K9+S2wrR`~H`?IwDx3mpN+l;i$N!tt3_Oi77k3!lb?G@7ADDBPCzFgWnr2Q?@ zzL&JWQ`+Ap?T1MFVbXrMw0}g}PmuOcNc+jseyX&8M%vGm_Oqq^E7JaTY5$J2UnA|` zm-Zh>`wyl4c4@y$+J7SL4;G~T_tO3sX@6GQ|0?Z&llINh-YxChq>FtEA9*nRKp}&MTzzTIsxAI&YND z+okg^>HLXw-Y=aGO6SAU`M7lcO*;P}on6woPCCb=b3!_&r1Q1ro|hGo6|a{S{~{~) zloe&N;@z_1P+4)dthhi{++^>p_>ru5T2^e372UF8=l}cs>kY5_KU{Lh|Kb0Stl0Ix Dyxl=H delta 25187 zcmZtMcX(B0wm^fr~+vwOvM@L704>Nb}+#%$gtt`b6W$s=5Iz__6h0h2 z5`HXvJbWsAI{b9_x$qyte+<7HelPrf_=E6=;g7-}hrbB_Is9$-yYNrppToZgf`s6* zpj=Qrs1ei*>ILY4!S%tQU__7^j0|$3!T4Z8FgchKObwO= zw+73D+k)GJ6~P_B%HYo6o?vZof3P9g8f*);2Rnj2!O7t9;8burcp^9xJQU&cBD=uInpqa z8fhG95@{7_+dk4ga!sU1q+jIv$PJMjBR5AzMsgzKBNHN%BU2(%BXc85BDY6YNA8Z? z8!3vck8F&@Bikao4kwknf&nmVuPDbD`JVAV~Im!my{O05=z*cu=Qx+WeN9O+H6?f%$qvTiX{%YAWwdk z52}1PwEy6q*)zwNRQbS^F%>HgnUa$|FFAW=a;pxl+b0h^+T*H(s+Tr(Q`K9l=2gDG z+^KT!R-Ih#Ox3Be#KOYVSmKgc;<8v`*;tw4SebWYiOXYUw#O20FIrwP;ia{&{GoAC z1FR+d<2PqIC{E9LQ+CB9*)HlU%%iAWjzsIR@gBfzBQKk#w)IH zrLwLH-yU9B*sdhJB78?I@y%G`TP0Vn2(OA=QW{Hqd(bU8gJ#aVG54R=;aTCevBX!3 z|2l-PR0L& z=nLUj3p?!yzZiZg{BrnwxHNnr{7NkGFR{e0V~O9y62FZleiuvpK9=~`9pTqB4!;q8 zQ)7+8@5B;+_$RL)V~M~2Q)CITP-t|Yp1q2`N=o>$(kJ21E>ie3miV`L`14rePyZwt z{VM!*VW)WbFR{d*uyizujCFlnE05n?ewZg}h?WzjiqY zt_aHhH;>?|SSaD&We5(|~t8vgC#8C1JC^RFv( zY0(>X5|W}xL2_ZIlJIXqoqwVeOD+Qcg&BIwr0m@BGyWams-R)e=s&!!j)gA!_pKnv z2pa#d-3gintqMDB37Q8jf|jw+6|vBju~6A98V7BHwilmz=&D$#{FsVGGm;Y)*6tj1 z{qJoLy2V1}{^Phu(DR}=8r*nY(6_Kt{K{dmOHRduez8!+|4N0Re__Wh!GK_3aAPb~ zDHf_43q}5IepN6exVf-h$(1=*j*ne(CU(h_CBd*@cq~*o7OGOTG$o;7&Cx;j{{f5* z#>GP6SSTnuo05=Hb7GMHe*gu+q*$n0EL446-GqiWO%E0ncG?-t2xbPeg4w~GU~Vuk zm>&z(jD>2&LP@bu?O3QzER-AzrNlyYcWN9gQeVTMFo*?9f`68|P`$#6g`;AjKCw_{ zEHp9}$|+h2;L0;I-*a` z_{lSGnQ+Va>9J7zSmHnaS-^vLfld!@rJUf?;Il%pP}f+f%SAO!kQ@AY?Gx`bFZ$-Hgv+n^SB$TMzZ4~uNeIf6 z1YZ}u*D#@WnQw#d)}Q5q97LJKW%Em(16;)`I zkkYO~BwW~OTcl#7QlxUEN~CHm)HfFD7Ykh<3*8V4_1_lW9En7#B~*>nh$Vgy3k`^c zZY&%f3k@!s*eGF8*}9SXg`MJ&da=;JqNf@qWR$%+l74X_EfyM7^lPJpde=9LwD|Ar zinNS{hWuA{McNd0EQz#@w2OsqiiL*#TPIgVI!3zu@3e??jdY8JhQ>lS7v*1_kW#Z( zU*VQ4Hg>faoOU*s?%k{Jt)j)k)S z!(nvf|Kx6DY-C(4G$t0x8s4X8uU0Dol$^cSU4IWTu?R?8uy=+tL!QPac0!D^p{kNv}udMdn8qL>9UtvM91R zp?_pf!hlF&(f4Ty1ELdRp-HjOltQls|1JE;(#W!l!q+esDu{)0|0Vmv|61TDkvj^< zY>BLl+!W^%X>?rDF${i^h(m0`B^c~9mk#g@*?gPsGM7h6H?pMnFM)?HFUrPDQD1Rm8%Tc}}<*QIW zpnPM>_oaLe<)=~pcFG^8{EL+TiVBxgp)M8LQ=tPDdQ+hf6$VjZ2+;~dsW6NRBd9Qr z3gf9TkqY@#m`sJ8R4ApwS5%Bpu|5?WQn4)++f%V46+2V0D;2v_u?H1kiVE@wrV-2|SVFL#;6Z}p1ZN4JCOAj%Ji#9bJ|Vam_*a78 zh#+zakxPl(L}WCPd?J&G%p@|K$Xp`xiNuI3C2}i~+lZ`*5?M#&IFTobyhr3SB3}^s zipbYQekJl7)yh(>9MvjNtrFF$P%TWg2-WISts&JKQ7w&X8B}XRwPsWsO10@!yPs;O zsP-P!FQs}pswY$^n%X>}PP7HpTT#6Y)!R|MJJowoJ(KD=RG&cgLaMKz`bw(rp!xx- zAENqORR56bA5$ZN8dp-|Dr%%sqZu_?P-7}J=22q-HBM0DDQY}J%@8#!5Up8>ni_E*<)a*sg-qg&d<~VAOr{-j8&Yf|`G$ z=Fil+gj$zUD@3i!sdXi_s!%ITt%#^v)u~mJTK%Xsn_Anb^$NAVqSoK3^((c0Cn6NoPrVnxtn*dXA(Qs9lHJji}w4+HI-biP~ML z-HqD4sNI{|eW-mSwTDxC2DJ;Ry@cA)rPN+V?YpVHmfD-CT}yR6bA~!EQ>Tbymrcc}9LIE z(c}b@%aB}=H7R$IvX+$lNGTy@D=7y_IZVpKq#PyX7%9g|IZ4WMq`W}NOBbD- zC*=YuuafdQDZf&;8g;u;cRY1(rS5L(9;WWY)P0(|&rb^kTm#BN5x)-SXDs^9{ z?t9ezfV%&nUWj^^Q?DuY+EK3q^=_tK7WJZIskexF%c*xe^$t<*IQ33a?;q3;QU7x4 zUrqfM)Ne)o0n{Hx{Snm9r2a(e=TSeO`UTXVNBsrVUrznosecFc@1*`}>fc5E2dKY^ z`VSJVAE*8n>Tjq1>oiE9K{FbRp}`s&?4iMYB^G?k{Xa&jno>XrjS~X z)CQ!clG-#%>JU;#k~){v7^zE1T~6xlq~1qr5vf~A-A?LGQg@TOm(=~F9w7B3si#PN zg48ETeTvj)NIggDUubkCjhfTwW*RM`(Iy&gr_oLtU9pQshiG)9=+3qY&qp65El%1N z(zcPdle9gg?I-OZX-7yqO4?(joh0ovX-|^&G->BZdx5lAZOWFsd zeN5V?qXgQRyQ|A4B?B z(#MlNk@S4hCzC#n^qHj3A$>mSi%2gdeJSb7Nnb(wouscJ{T|ZqBYi#T50Ji@^f>8T zN#8;GZqoOWet`7Dq(4IXu_);$NIymT8PcC3{aMnVC;cVTOG$r~^fyR0gun9qB)i{uAlHkp3GP31nPK#${w&Nk%y`Dw0u!jDU=2buwy^QHPAW zWHcb75gF-ZG$Ero8Lh}@OGXDWI+M|jj2>iMOGY0ut|#LrGHxbg9vMr>SVqQ1GPaSi zgN!H0c#e!0$oP(oU�e(Z)4tT%X1bX?zWh`_cFY8s9?W=`@~6CTM(?8SnuQdIUW=S+_NV7&XyM|`{Xm$h53TQTqW^-t^ zj%E+iEKajiG<%k2&(Z7$n*By|G_N0}d1IP4rTHM5XVQEW&6m@B4bAVS`F@%orTHlX!QqL{fSne(z+6@YtXtDty|E# zBdt5rx`5WRXg!D4_tUzB)>~=)G_7Bzbt$dCp!N5({(&~NX_HEut7+4nHhpPxJ#EI( zrhqn+MYXw|Hh0rzEp3j|<}7WVrp(D+f2J+ z+MTA|Iodr>yPs))3GFYXeIwd8r+rJ>52pP{+K;CF0@^R5{c_qLrTuBzpP~I{wEvd& z-_xN!9U9Z2DIKCW(P1sk z104s@aV{NWbX-cu{d7D^$76JSi;f@C@nbrbr&EMZ)#=ooPJM}Xx}Hwc=ro^B3+Yrs zr#*DqN2izR^d_C&rgL>V*QIlPI*+4s0i7q)c|D!ublyVer|A3=ozK(xJ39YD=YP