qtopia-4.2.2-add-avr32-arch.patch 199 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139
  1. diff -Nupr a/include/Qt/qatomic_avr32.h b/include/Qt/qatomic_avr32.h
  2. --- a/include/Qt/qatomic_avr32.h 1970-01-01 01:00:00.000000000 +0100
  3. +++ b/include/Qt/qatomic_avr32.h 2006-07-27 07:55:09.000000000 +0200
  4. @@ -0,0 +1 @@
  5. +#include "../../src/corelib/arch/qatomic_avr32.h"
  6. diff -Nupr a/include/QtCore/qatomic_avr32.h b/include/QtCore/qatomic_avr32.h
  7. --- a/include/QtCore/qatomic_avr32.h 1970-01-01 01:00:00.000000000 +0100
  8. +++ b/include/QtCore/qatomic_avr32.h 2006-07-27 07:55:28.000000000 +0200
  9. @@ -0,0 +1 @@
  10. +#include "../../src/corelib/arch/qatomic_avr32.h"
  11. diff -Nupr a/src/corelib/arch/arch.pri b/src/corelib/arch/arch.pri
  12. --- a/src/corelib/arch/arch.pri 2006-06-30 09:49:44.000000000 +0200
  13. +++ b/src/corelib/arch/arch.pri 2006-07-26 11:03:43.000000000 +0200
  14. @@ -13,6 +13,7 @@ mac:HEADERS += arch/qatomic_macosx.h \
  15. arch/qatomic_generic.h \
  16. arch/qatomic_powerpc.h \
  17. arch/qatomic_arm.h \
  18. + arch/qatomic_avr32.h \
  19. arch/qatomic_i386.h \
  20. arch/qatomic_mips.h \
  21. arch/qatomic_s390.h \
  22. diff -Nupr a/src/corelib/arch/avr32/arch.pri b/src/corelib/arch/avr32/arch.pri
  23. --- a/src/corelib/arch/avr32/arch.pri 1970-01-01 01:00:00.000000000 +0100
  24. +++ b/src/corelib/arch/avr32/arch.pri 2006-07-26 11:02:16.000000000 +0200
  25. @@ -0,0 +1,5 @@
  26. +#
  27. +# AVR32 architecture
  28. +#
  29. +SOURCES += $$QT_ARCH_CPP/qatomic.cpp \
  30. + $$QT_ARCH_CPP/malloc.c
  31. diff -Nupr a/src/corelib/arch/avr32/malloc.c b/src/corelib/arch/avr32/malloc.c
  32. --- a/src/corelib/arch/avr32/malloc.c 1970-01-01 01:00:00.000000000 +0100
  33. +++ b/src/corelib/arch/avr32/malloc.c 2006-07-28 10:29:44.000000000 +0200
  34. @@ -0,0 +1,5819 @@
  35. +/****************************************************************************
  36. +**
  37. +** This file is part of the QtCore module of the Qt Toolkit.
  38. +**
  39. +** This file contains third party code which is not governed by the Qt
  40. +** Commercial License Agreement. Please read the license headers below
  41. +** for more information.
  42. +**
  43. +** Further information about Qt licensing is available at:
  44. +** http://www.trolltech.com/products/qt/licensing.html or by
  45. +** contacting info@trolltech.com.
  46. +**
  47. +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
  48. +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  49. +**
  50. +****************************************************************************/
  51. +
  52. +/* ---- config.h */
  53. +#define KDE_MALLOC
  54. +#define KDE_MALLOC_FULL
  55. +#define KDE_MALLOC_AVR32
  56. +/* ---- */
  57. +
  58. +#ifdef KDE_MALLOC
  59. +
  60. +#ifdef KDE_MALLOC_DEBUG
  61. +#define DEBUG
  62. +#endif
  63. +
  64. +#define USE_MALLOC_LOCK
  65. +#define INLINE __inline__
  66. +/*#define INLINE*/
  67. +#define USE_MEMCPY 0
  68. +#define MMAP_CLEARS 1
  69. +
  70. +/*
  71. + This is a version (aka dlmalloc) of malloc/free/realloc written by
  72. + Doug Lea and released to the public domain. Use, modify, and
  73. + redistribute this code without permission or acknowledgment in any
  74. + way you wish. Send questions, comments, complaints, performance
  75. + data, etc to dl@cs.oswego.edu
  76. +
  77. +* VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
  78. +
  79. + Note: There may be an updated version of this malloc obtainable at
  80. + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
  81. + Check before installing!
  82. +
  83. +* Quickstart
  84. +
  85. + This library is all in one file to simplify the most common usage:
  86. + ftp it, compile it (-O), and link it into another program. All
  87. + of the compile-time options default to reasonable values for use on
  88. + most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
  89. + You might later want to step through various compile-time and dynamic
  90. + tuning options.
  91. +
  92. + For convenience, an include file for code using this malloc is at:
  93. + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.0.h
  94. + You don't really need this .h file unless you call functions not
  95. + defined in your system include files. The .h file contains only the
  96. + excerpts from this file needed for using this malloc on ANSI C/C++
  97. + systems, so long as you haven't changed compile-time options about
  98. + naming and tuning parameters. If you do, then you can create your
  99. + own malloc.h that does include all settings by cutting at the point
  100. + indicated below.
  101. +
  102. +* Why use this malloc?
  103. +
  104. + This is not the fastest, most space-conserving, most portable, or
  105. + most tunable malloc ever written. However it is among the fastest
  106. + while also being among the most space-conserving, portable and tunable.
  107. + Consistent balance across these factors results in a good general-purpose
  108. + allocator for malloc-intensive programs.
  109. +
  110. + The main properties of the algorithms are:
  111. + * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
  112. + with ties normally decided via FIFO (i.e. least recently used).
  113. + * For small (<= 64 bytes by default) requests, it is a caching
  114. + allocator, that maintains pools of quickly recycled chunks.
  115. + * In between, and for combinations of large and small requests, it does
  116. + the best it can trying to meet both goals at once.
  117. + * For very large requests (>= 128KB by default), it relies on system
  118. + memory mapping facilities, if supported.
  119. +
  120. + For a longer but slightly out of date high-level description, see
  121. + http://gee.cs.oswego.edu/dl/html/malloc.html
  122. +
  123. + You may already by default be using a C library containing a malloc
  124. + that is based on some version of this malloc (for example in
  125. + linux). You might still want to use the one in this file in order to
  126. + customize settings or to avoid overheads associated with library
  127. + versions.
  128. +
  129. +* Contents, described in more detail in "description of public routines" below.
  130. +
  131. + Standard (ANSI/SVID/...) functions:
  132. + malloc(size_t n);
  133. + calloc(size_t n_elements, size_t element_size);
  134. + free(Void_t* p);
  135. + realloc(Void_t* p, size_t n);
  136. + memalign(size_t alignment, size_t n);
  137. + valloc(size_t n);
  138. + mallinfo()
  139. + mallopt(int parameter_number, int parameter_value)
  140. +
  141. + Additional functions:
  142. + independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
  143. + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
  144. + pvalloc(size_t n);
  145. + cfree(Void_t* p);
  146. + malloc_trim(size_t pad);
  147. + malloc_usable_size(Void_t* p);
  148. + malloc_stats();
  149. +
  150. +* Vital statistics:
  151. +
  152. + Supported pointer representation: 4 or 8 bytes
  153. + Supported size_t representation: 4 or 8 bytes
  154. + Note that size_t is allowed to be 4 bytes even if pointers are 8.
  155. + You can adjust this by defining INTERNAL_SIZE_T
  156. +
  157. + Alignment: 2 * sizeof(size_t) (default)
  158. + (i.e., 8 byte alignment with 4byte size_t). This suffices for
  159. + nearly all current machines and C compilers. However, you can
  160. + define MALLOC_ALIGNMENT to be wider than this if necessary.
  161. +
  162. + Minimum overhead per allocated chunk: 4 or 8 bytes
  163. + Each malloced chunk has a hidden word of overhead holding size
  164. + and status information.
  165. +
  166. + Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
  167. + 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
  168. +
  169. + When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
  170. + ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
  171. + needed; 4 (8) for a trailing size field and 8 (16) bytes for
  172. + free list pointers. Thus, the minimum allocatable size is
  173. + 16/24/32 bytes.
  174. +
  175. + Even a request for zero bytes (i.e., malloc(0)) returns a
  176. + pointer to something of the minimum allocatable size.
  177. +
  178. + The maximum overhead wastage (i.e., number of extra bytes
  179. + allocated than were requested in malloc) is less than or equal
  180. + to the minimum size, except for requests >= mmap_threshold that
  181. + are serviced via mmap(), where the worst case wastage is 2 *
  182. + sizeof(size_t) bytes plus the remainder from a system page (the
  183. + minimal mmap unit); typically 4096 or 8192 bytes.
  184. +
  185. + Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
  186. + 8-byte size_t: 2^64 minus about two pages
  187. +
  188. + It is assumed that (possibly signed) size_t values suffice to
  189. + represent chunk sizes. `Possibly signed' is due to the fact
  190. + that `size_t' may be defined on a system as either a signed or
  191. + an unsigned type. The ISO C standard says that it must be
  192. + unsigned, but a few systems are known not to adhere to this.
  193. + Additionally, even when size_t is unsigned, sbrk (which is by
  194. + default used to obtain memory from system) accepts signed
  195. + arguments, and may not be able to handle size_t-wide arguments
  196. + with negative sign bit. Generally, values that would
  197. + appear as negative after accounting for overhead and alignment
  198. + are supported only via mmap(), which does not have this
  199. + limitation.
  200. +
  201. + Requests for sizes outside the allowed range will perform an optional
  202. + failure action and then return null. (Requests may also
  203. + also fail because a system is out of memory.)
  204. +
  205. + Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
  206. +
  207. + When USE_MALLOC_LOCK is defined, wrappers are created to
  208. + surround every public call with either a pthread mutex or
  209. + a win32 spinlock (depending on WIN32). This is not
  210. + especially fast, and can be a major bottleneck.
  211. + It is designed only to provide minimal protection
  212. + in concurrent environments, and to provide a basis for
  213. + extensions. If you are using malloc in a concurrent program,
  214. + you would be far better off obtaining ptmalloc, which is
  215. + derived from a version of this malloc, and is well-tuned for
  216. + concurrent programs. (See http://www.malloc.de)
  217. +
  218. + Compliance: I believe it is compliant with the 1997 Single Unix Specification
  219. + (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
  220. + others as well.
  221. +
  222. +* Synopsis of compile-time options:
  223. +
  224. + People have reported using previous versions of this malloc on all
  225. + versions of Unix, sometimes by tweaking some of the defines
  226. + below. It has been tested most extensively on Solaris and
  227. + Linux. It is also reported to work on WIN32 platforms.
  228. + People also report using it in stand-alone embedded systems.
  229. +
  230. + The implementation is in straight, hand-tuned ANSI C. It is not
  231. + at all modular. (Sorry!) It uses a lot of macros. To be at all
  232. + usable, this code should be compiled using an optimizing compiler
  233. + (for example gcc -O3) that can simplify expressions and control
  234. + paths. (FAQ: some macros import variables as arguments rather than
  235. + declare locals because people reported that some debuggers
  236. + otherwise get confused.)
  237. +
  238. + OPTION DEFAULT VALUE
  239. +
  240. + Compilation Environment options:
  241. +
  242. + __STD_C derived from C compiler defines
  243. + WIN32 NOT defined
  244. + HAVE_MEMCPY defined
  245. + USE_MEMCPY 1 if HAVE_MEMCPY is defined
  246. + HAVE_MMAP defined as 1
  247. + MMAP_CLEARS 1
  248. + HAVE_MREMAP 0 unless linux defined
  249. + malloc_getpagesize derived from system #includes, or 4096 if not
  250. + HAVE_USR_INCLUDE_MALLOC_H NOT defined
  251. + LACKS_UNISTD_H NOT defined unless WIN32
  252. + LACKS_SYS_PARAM_H NOT defined unless WIN32
  253. + LACKS_SYS_MMAN_H NOT defined unless WIN32
  254. +
  255. + Changing default word sizes:
  256. +
  257. + INTERNAL_SIZE_T size_t
  258. + MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
  259. +
  260. + Configuration and functionality options:
  261. +
  262. + USE_DL_PREFIX NOT defined
  263. + USE_PUBLIC_MALLOC_WRAPPERS NOT defined
  264. + USE_MALLOC_LOCK NOT defined
  265. + DEBUG NOT defined
  266. + REALLOC_ZERO_BYTES_FREES NOT defined
  267. + MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
  268. + TRIM_FASTBINS 0
  269. +
  270. + Options for customizing MORECORE:
  271. +
  272. + MORECORE sbrk
  273. + MORECORE_CONTIGUOUS 1
  274. + MORECORE_CANNOT_TRIM NOT defined
  275. + MMAP_AS_MORECORE_SIZE (1024 * 1024)
  276. +
  277. + Tuning options that are also dynamically changeable via mallopt:
  278. +
  279. + DEFAULT_MXFAST 64
  280. + DEFAULT_TRIM_THRESHOLD 128 * 1024
  281. + DEFAULT_TOP_PAD 0
  282. + DEFAULT_MMAP_THRESHOLD 128 * 1024
  283. + DEFAULT_MMAP_MAX 65536
  284. +
  285. + There are several other #defined constants and macros that you
  286. + probably don't want to touch unless you are extending or adapting malloc.
  287. +*/
  288. +
  289. +/*
  290. + WIN32 sets up defaults for MS environment and compilers.
  291. + Otherwise defaults are for unix.
  292. +*/
  293. +
  294. +/* #define WIN32 */
  295. +
  296. +#ifdef WIN32
  297. +
  298. +#define WIN32_LEAN_AND_MEAN
  299. +#include <windows.h>
  300. +
  301. +/* Win32 doesn't supply or need the following headers */
  302. +#define LACKS_UNISTD_H
  303. +#define LACKS_SYS_PARAM_H
  304. +#define LACKS_SYS_MMAN_H
  305. +
  306. +/* Use the supplied emulation of sbrk */
  307. +#define MORECORE sbrk
  308. +#define MORECORE_CONTIGUOUS 1
  309. +#define MORECORE_FAILURE ((void*)(-1))
  310. +
  311. +/* Use the supplied emulation of mmap and munmap */
  312. +#define HAVE_MMAP 1
  313. +#define MUNMAP_FAILURE (-1)
  314. +#define MMAP_CLEARS 1
  315. +
  316. +/* These values don't really matter in windows mmap emulation */
  317. +#define MAP_PRIVATE 1
  318. +#define MAP_ANONYMOUS 2
  319. +#define PROT_READ 1
  320. +#define PROT_WRITE 2
  321. +
  322. +/* Emulation functions defined at the end of this file */
  323. +
  324. +/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
  325. +#ifdef USE_MALLOC_LOCK
  326. +static int slwait(int *sl);
  327. +static int slrelease(int *sl);
  328. +#endif
  329. +
  330. +static long getpagesize(void);
  331. +static long getregionsize(void);
  332. +static void *sbrk(long size);
  333. +static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
  334. +static long munmap(void *ptr, long size);
  335. +
  336. +static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed);
  337. +static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user);
  338. +
  339. +#endif
  340. +
  341. +/*
  342. + __STD_C should be nonzero if using ANSI-standard C compiler, a C++
  343. + compiler, or a C compiler sufficiently close to ANSI to get away
  344. + with it.
  345. +*/
  346. +
  347. +#ifndef __STD_C
  348. +#if defined(__STDC__) || defined(_cplusplus)
  349. +#define __STD_C 1
  350. +#else
  351. +#define __STD_C 0
  352. +#endif
  353. +#endif /*__STD_C*/
  354. +
  355. +
  356. +/*
  357. + Void_t* is the pointer type that malloc should say it returns
  358. +*/
  359. +
  360. +#ifndef Void_t
  361. +#if (__STD_C || defined(WIN32))
  362. +#define Void_t void
  363. +#else
  364. +#define Void_t char
  365. +#endif
  366. +#endif /*Void_t*/
  367. +
  368. +#if __STD_C
  369. +#include <stddef.h> /* for size_t */
  370. +#else
  371. +#include <sys/types.h>
  372. +#endif
  373. +
  374. +#ifdef __cplusplus
  375. +extern "C" {
  376. +#endif
  377. +
  378. +/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
  379. +
  380. +/* #define LACKS_UNISTD_H */
  381. +
  382. +#ifndef LACKS_UNISTD_H
  383. +#include <unistd.h>
  384. +#endif
  385. +
  386. +/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
  387. +
  388. +/* #define LACKS_SYS_PARAM_H */
  389. +
  390. +
  391. +#include <stdio.h> /* needed for malloc_stats */
  392. +#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
  393. +
  394. +
  395. +/*
  396. + Debugging:
  397. +
  398. + Because freed chunks may be overwritten with bookkeeping fields, this
  399. + malloc will often die when freed memory is overwritten by user
  400. + programs. This can be very effective (albeit in an annoying way)
  401. + in helping track down dangling pointers.
  402. +
  403. + If you compile with -DDEBUG, a number of assertion checks are
  404. + enabled that will catch more memory errors. You probably won't be
  405. + able to make much sense of the actual assertion errors, but they
  406. + should help you locate incorrectly overwritten memory. The
  407. + checking is fairly extensive, and will slow down execution
  408. + noticeably. Calling malloc_stats or mallinfo with DEBUG set will
  409. + attempt to check every non-mmapped allocated and free chunk in the
  410. + course of computing the summmaries. (By nature, mmapped regions
  411. + cannot be checked very much automatically.)
  412. +
  413. + Setting DEBUG may also be helpful if you are trying to modify
  414. + this code. The assertions in the check routines spell out in more
  415. + detail the assumptions and invariants underlying the algorithms.
  416. +
  417. + Setting DEBUG does NOT provide an automated mechanism for checking
  418. + that all accesses to malloced memory stay within their
  419. + bounds. However, there are several add-ons and adaptations of this
  420. + or other mallocs available that do this.
  421. +*/
  422. +
  423. +#ifdef DEBUG
  424. +#include <assert.h>
  425. +#else
  426. +#define assert(x) ((void)0)
  427. +#endif
  428. +
  429. +
  430. +/*
  431. + INTERNAL_SIZE_T is the word-size used for internal bookkeeping
  432. + of chunk sizes.
  433. +
  434. + The default version is the same as size_t.
  435. +
  436. + While not strictly necessary, it is best to define this as an
  437. + unsigned type, even if size_t is a signed type. This may avoid some
  438. + artificial size limitations on some systems.
  439. +
  440. + On a 64-bit machine, you may be able to reduce malloc overhead by
  441. + defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
  442. + expense of not being able to handle more than 2^32 of malloced
  443. + space. If this limitation is acceptable, you are encouraged to set
  444. + this unless you are on a platform requiring 16byte alignments. In
  445. + this case the alignment requirements turn out to negate any
  446. + potential advantages of decreasing size_t word size.
  447. +
  448. + Implementors: Beware of the possible combinations of:
  449. + - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
  450. + and might be the same width as int or as long
  451. + - size_t might have different width and signedness as INTERNAL_SIZE_T
  452. + - int and long might be 32 or 64 bits, and might be the same width
  453. + To deal with this, most comparisons and difference computations
  454. + among INTERNAL_SIZE_Ts should cast them to unsigned long, being
  455. + aware of the fact that casting an unsigned int to a wider long does
  456. + not sign-extend. (This also makes checking for negative numbers
  457. + awkward.) Some of these casts result in harmless compiler warnings
  458. + on some systems.
  459. +*/
  460. +
  461. +#ifndef INTERNAL_SIZE_T
  462. +#define INTERNAL_SIZE_T size_t
  463. +#endif
  464. +
  465. +/* The corresponding word size */
  466. +#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
  467. +
  468. +
  469. +/*
  470. + MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
  471. + It must be a power of two at least 2 * SIZE_SZ, even on machines
  472. + for which smaller alignments would suffice. It may be defined as
  473. + larger than this though. Note however that code and data structures
  474. + are optimized for the case of 8-byte alignment.
  475. +*/
  476. +
  477. +
  478. +#ifndef MALLOC_ALIGNMENT
  479. +#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
  480. +#endif
  481. +
  482. +/* The corresponding bit mask value */
  483. +#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
  484. +
  485. +
  486. +
  487. +/*
  488. + REALLOC_ZERO_BYTES_FREES should be set if a call to
  489. + realloc with zero bytes should be the same as a call to free.
  490. + Some people think it should. Otherwise, since this malloc
  491. + returns a unique pointer for malloc(0), so does realloc(p, 0).
  492. +*/
  493. +
  494. +/* #define REALLOC_ZERO_BYTES_FREES */
  495. +
  496. +/*
  497. + TRIM_FASTBINS controls whether free() of a very small chunk can
  498. + immediately lead to trimming. Setting to true (1) can reduce memory
  499. + footprint, but will almost always slow down programs that use a lot
  500. + of small chunks.
  501. +
  502. + Define this only if you are willing to give up some speed to more
  503. + aggressively reduce system-level memory footprint when releasing
  504. + memory in programs that use many small chunks. You can get
  505. + essentially the same effect by setting MXFAST to 0, but this can
  506. + lead to even greater slowdowns in programs using many small chunks.
  507. + TRIM_FASTBINS is an in-between compile-time option, that disables
  508. + only those chunks bordering topmost memory from being placed in
  509. + fastbins.
  510. +*/
  511. +
  512. +#ifndef TRIM_FASTBINS
  513. +#define TRIM_FASTBINS 0
  514. +#endif
  515. +
  516. +
  517. +/*
  518. + USE_DL_PREFIX will prefix all public routines with the string 'dl'.
  519. + This is necessary when you only want to use this malloc in one part
  520. + of a program, using your regular system malloc elsewhere.
  521. +*/
  522. +
  523. +/* #define USE_DL_PREFIX */
  524. +
  525. +
  526. +/*
  527. + USE_MALLOC_LOCK causes wrapper functions to surround each
  528. + callable routine with pthread mutex lock/unlock.
  529. +
  530. + USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
  531. +*/
  532. +
  533. +
  534. +/* #define USE_MALLOC_LOCK */
  535. +
  536. +
  537. +/*
  538. + If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
  539. + actually a wrapper function that first calls MALLOC_PREACTION, then
  540. + calls the internal routine, and follows it with
  541. + MALLOC_POSTACTION. This is needed for locking, but you can also use
  542. + this, without USE_MALLOC_LOCK, for purposes of interception,
  543. + instrumentation, etc. It is a sad fact that using wrappers often
  544. + noticeably degrades performance of malloc-intensive programs.
  545. +*/
  546. +
  547. +#ifdef USE_MALLOC_LOCK
  548. +#define USE_PUBLIC_MALLOC_WRAPPERS
  549. +#else
  550. +/* #define USE_PUBLIC_MALLOC_WRAPPERS */
  551. +#endif
  552. +
  553. +
  554. +/*
  555. + Two-phase name translation.
  556. + All of the actual routines are given mangled names.
  557. + When wrappers are used, they become the public callable versions.
  558. + When DL_PREFIX is used, the callable names are prefixed.
  559. +*/
  560. +
  561. +#ifndef USE_PUBLIC_MALLOC_WRAPPERS
  562. +#define cALLOc public_cALLOc
  563. +#define fREe public_fREe
  564. +#define cFREe public_cFREe
  565. +#define mALLOc public_mALLOc
  566. +#define mEMALIGn public_mEMALIGn
  567. +#define rEALLOc public_rEALLOc
  568. +#define vALLOc public_vALLOc
  569. +#define pVALLOc public_pVALLOc
  570. +#define mALLINFo public_mALLINFo
  571. +#define mALLOPt public_mALLOPt
  572. +#define mTRIm public_mTRIm
  573. +#define mSTATs public_mSTATs
  574. +#define mUSABLe public_mUSABLe
  575. +#define iCALLOc public_iCALLOc
  576. +#define iCOMALLOc public_iCOMALLOc
  577. +#endif
  578. +
  579. +#ifdef USE_DL_PREFIX
  580. +#define public_cALLOc dlcalloc
  581. +#define public_fREe dlfree
  582. +#define public_cFREe dlcfree
  583. +#define public_mALLOc dlmalloc
  584. +#define public_mEMALIGn dlmemalign
  585. +#define public_rEALLOc dlrealloc
  586. +#define public_vALLOc dlvalloc
  587. +#define public_pVALLOc dlpvalloc
  588. +#define public_mALLINFo dlmallinfo
  589. +#define public_mALLOPt dlmallopt
  590. +#define public_mTRIm dlmalloc_trim
  591. +#define public_mSTATs dlmalloc_stats
  592. +#define public_mUSABLe dlmalloc_usable_size
  593. +#define public_iCALLOc dlindependent_calloc
  594. +#define public_iCOMALLOc dlindependent_comalloc
  595. +#else /* USE_DL_PREFIX */
  596. +#define public_cALLOc calloc
  597. +#define public_fREe free
  598. +#define public_cFREe cfree
  599. +#define public_mALLOc malloc
  600. +#define public_mEMALIGn memalign
  601. +#define public_rEALLOc realloc
  602. +#define public_vALLOc valloc
  603. +#define public_pVALLOc pvalloc
  604. +#define public_mALLINFo mallinfo
  605. +#define public_mALLOPt mallopt
  606. +#define public_mTRIm malloc_trim
  607. +#define public_mSTATs malloc_stats
  608. +#define public_mUSABLe malloc_usable_size
  609. +#define public_iCALLOc independent_calloc
  610. +#define public_iCOMALLOc independent_comalloc
  611. +#endif /* USE_DL_PREFIX */
  612. +
  613. +
  614. +/*
  615. + HAVE_MEMCPY should be defined if you are not otherwise using
  616. + ANSI STD C, but still have memcpy and memset in your C library
  617. + and want to use them in calloc and realloc. Otherwise simple
  618. + macro versions are defined below.
  619. +
  620. + USE_MEMCPY should be defined as 1 if you actually want to
  621. + have memset and memcpy called. People report that the macro
  622. + versions are faster than libc versions on some systems.
  623. +
  624. + Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
  625. + (of <= 36 bytes) are manually unrolled in realloc and calloc.
  626. +*/
  627. +
  628. +/* If it's available it's defined in config.h. */
  629. +/* #define HAVE_MEMCPY */
  630. +
  631. +#ifndef USE_MEMCPY
  632. +#ifdef HAVE_MEMCPY
  633. +#define USE_MEMCPY 1
  634. +#else
  635. +#define USE_MEMCPY 0
  636. +#endif
  637. +#endif
  638. +
  639. +
  640. +#if (__STD_C || defined(HAVE_MEMCPY))
  641. +
  642. +#ifdef WIN32
  643. +/* On Win32 memset and memcpy are already declared in windows.h */
  644. +#else
  645. +#if __STD_C
  646. +void* memset(void*, int, size_t);
  647. +void* memcpy(void*, const void*, size_t);
  648. +#else
  649. +Void_t* memset();
  650. +Void_t* memcpy();
  651. +#endif
  652. +#endif
  653. +#endif
  654. +
  655. +/*
  656. + MALLOC_FAILURE_ACTION is the action to take before "return 0" when
  657. + malloc fails to be able to return memory, either because memory is
  658. + exhausted or because of illegal arguments.
  659. +
  660. + By default, sets errno if running on STD_C platform, else does nothing.
  661. +*/
  662. +
  663. +#ifndef MALLOC_FAILURE_ACTION
  664. +#if __STD_C
  665. +#define MALLOC_FAILURE_ACTION \
  666. + errno = ENOMEM;
  667. +
  668. +#else
  669. +#define MALLOC_FAILURE_ACTION
  670. +#endif
  671. +#endif
  672. +
  673. +/*
  674. + MORECORE-related declarations. By default, rely on sbrk
  675. +*/
  676. +
  677. +
  678. +#ifdef LACKS_UNISTD_H
  679. +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  680. +#if __STD_C
  681. +extern Void_t* sbrk(ptrdiff_t);
  682. +#else
  683. +extern Void_t* sbrk();
  684. +#endif
  685. +#endif
  686. +#endif
  687. +
  688. +/*
  689. + MORECORE is the name of the routine to call to obtain more memory
  690. + from the system. See below for general guidance on writing
  691. + alternative MORECORE functions, as well as a version for WIN32 and a
  692. + sample version for pre-OSX macos.
  693. +*/
  694. +
  695. +#ifndef MORECORE
  696. +#define MORECORE sbrk
  697. +#endif
  698. +
  699. +/*
  700. + MORECORE_FAILURE is the value returned upon failure of MORECORE
  701. + as well as mmap. Since it cannot be an otherwise valid memory address,
  702. + and must reflect values of standard sys calls, you probably ought not
  703. + try to redefine it.
  704. +*/
  705. +
  706. +#ifndef MORECORE_FAILURE
  707. +#define MORECORE_FAILURE (-1)
  708. +#endif
  709. +
  710. +/*
  711. + If MORECORE_CONTIGUOUS is true, take advantage of fact that
  712. + consecutive calls to MORECORE with positive arguments always return
  713. + contiguous increasing addresses. This is true of unix sbrk. Even
  714. + if not defined, when regions happen to be contiguous, malloc will
  715. + permit allocations spanning regions obtained from different
  716. + calls. But defining this when applicable enables some stronger
  717. + consistency checks and space efficiencies.
  718. +*/
  719. +
  720. +#ifndef MORECORE_CONTIGUOUS
  721. +#define MORECORE_CONTIGUOUS 1
  722. +#endif
  723. +
  724. +/*
  725. + Define MORECORE_CANNOT_TRIM if your version of MORECORE
  726. + cannot release space back to the system when given negative
  727. + arguments. This is generally necessary only if you are using
  728. + a hand-crafted MORECORE function that cannot handle negative arguments.
  729. +*/
  730. +
  731. +/* #define MORECORE_CANNOT_TRIM */
  732. +
  733. +
  734. +/*
  735. + Define HAVE_MMAP as true to optionally make malloc() use mmap() to
  736. + allocate very large blocks. These will be returned to the
  737. + operating system immediately after a free(). Also, if mmap
  738. + is available, it is used as a backup strategy in cases where
  739. + MORECORE fails to provide space from system.
  740. +
  741. + This malloc is best tuned to work with mmap for large requests.
  742. + If you do not have mmap, operations involving very large chunks (1MB
  743. + or so) may be slower than you'd like.
  744. +*/
  745. +
  746. +#ifndef HAVE_MMAP
  747. +#define HAVE_MMAP 1
  748. +#endif
  749. +
  750. +#if HAVE_MMAP
  751. +/*
  752. + Standard unix mmap using /dev/zero clears memory so calloc doesn't
  753. + need to.
  754. +*/
  755. +
  756. +#ifndef MMAP_CLEARS
  757. +#define MMAP_CLEARS 1
  758. +#endif
  759. +
  760. +#else /* no mmap */
  761. +#ifndef MMAP_CLEARS
  762. +#define MMAP_CLEARS 0
  763. +#endif
  764. +#endif
  765. +
  766. +
  767. +/*
  768. + MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
  769. + sbrk fails, and mmap is used as a backup (which is done only if
  770. + HAVE_MMAP). The value must be a multiple of page size. This
  771. + backup strategy generally applies only when systems have "holes" in
  772. + address space, so sbrk cannot perform contiguous expansion, but
  773. + there is still space available on system. On systems for which
  774. + this is known to be useful (i.e. most linux kernels), this occurs
  775. + only when programs allocate huge amounts of memory. Between this,
  776. + and the fact that mmap regions tend to be limited, the size should
  777. + be large, to avoid too many mmap calls and thus avoid running out
  778. + of kernel resources.
  779. +*/
  780. +
  781. +#ifndef MMAP_AS_MORECORE_SIZE
  782. +#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
  783. +#endif
  784. +
  785. +/*
  786. + Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
  787. + large blocks. This is currently only possible on Linux with
  788. + kernel versions newer than 1.3.77.
  789. +*/
  790. +
  791. +#ifndef HAVE_MREMAP
  792. +#if defined(linux) || defined(__linux__) || defined(__linux)
  793. +#define HAVE_MREMAP 1
  794. +#else
  795. +#define HAVE_MREMAP 0
  796. +#endif
  797. +
  798. +#endif /* HAVE_MMAP */
  799. +
  800. +
  801. +/*
  802. + The system page size. To the extent possible, this malloc manages
  803. + memory from the system in page-size units. Note that this value is
  804. + cached during initialization into a field of malloc_state. So even
  805. + if malloc_getpagesize is a function, it is only called once.
  806. +
  807. + The following mechanics for getpagesize were adapted from bsd/gnu
  808. + getpagesize.h. If none of the system-probes here apply, a value of
  809. + 4096 is used, which should be OK: If they don't apply, then using
  810. + the actual value probably doesn't impact performance.
  811. +*/
  812. +
  813. +
  814. +#ifndef malloc_getpagesize
  815. +
  816. +#ifndef LACKS_UNISTD_H
  817. +# include <unistd.h>
  818. +#endif
  819. +
  820. +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
  821. +# ifndef _SC_PAGE_SIZE
  822. +# define _SC_PAGE_SIZE _SC_PAGESIZE
  823. +# endif
  824. +# endif
  825. +
  826. +# ifdef _SC_PAGE_SIZE
  827. +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  828. +# else
  829. +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  830. + extern size_t getpagesize();
  831. +# define malloc_getpagesize getpagesize()
  832. +# else
  833. +# ifdef WIN32 /* use supplied emulation of getpagesize */
  834. +# define malloc_getpagesize getpagesize()
  835. +# else
  836. +# ifndef LACKS_SYS_PARAM_H
  837. +# include <sys/param.h>
  838. +# endif
  839. +# ifdef EXEC_PAGESIZE
  840. +# define malloc_getpagesize EXEC_PAGESIZE
  841. +# else
  842. +# ifdef NBPG
  843. +# ifndef CLSIZE
  844. +# define malloc_getpagesize NBPG
  845. +# else
  846. +# define malloc_getpagesize (NBPG * CLSIZE)
  847. +# endif
  848. +# else
  849. +# ifdef NBPC
  850. +# define malloc_getpagesize NBPC
  851. +# else
  852. +# ifdef PAGESIZE
  853. +# define malloc_getpagesize PAGESIZE
  854. +# else /* just guess */
  855. +# define malloc_getpagesize (4096)
  856. +# endif
  857. +# endif
  858. +# endif
  859. +# endif
  860. +# endif
  861. +# endif
  862. +# endif
  863. +#endif
  864. +
  865. +/*
  866. + This version of malloc supports the standard SVID/XPG mallinfo
  867. + routine that returns a struct containing usage properties and
  868. + statistics. It should work on any SVID/XPG compliant system that has
  869. + a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
  870. + install such a thing yourself, cut out the preliminary declarations
  871. + as described above and below and save them in a malloc.h file. But
  872. + there's no compelling reason to bother to do this.)
  873. +
  874. + The main declaration needed is the mallinfo struct that is returned
  875. + (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
  876. + bunch of field that are not even meaningful in this version of
  877. + malloc. These fields are are instead filled by mallinfo() with
  878. + other numbers that might be of interest.
  879. +
  880. + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
  881. + /usr/include/malloc.h file that includes a declaration of struct
  882. + mallinfo. If so, it is included; else an SVID2/XPG2 compliant
  883. + version is declared below. These must be precisely the same for
  884. + mallinfo() to work. The original SVID version of this struct,
  885. + defined on most systems with mallinfo, declares all fields as
  886. + ints. But some others define as unsigned long. If your system
  887. + defines the fields using a type of different width than listed here,
  888. + you must #include your system version and #define
  889. + HAVE_USR_INCLUDE_MALLOC_H.
  890. +*/
  891. +
  892. +/* #define HAVE_USR_INCLUDE_MALLOC_H */
  893. +
  894. +/*#ifdef HAVE_USR_INCLUDE_MALLOC_H*/
  895. +#if 0
  896. +#include "/usr/include/malloc.h"
  897. +#else
  898. +
  899. +/* SVID2/XPG mallinfo structure */
  900. +
  901. +struct mallinfo {
  902. + int arena; /* non-mmapped space allocated from system */
  903. + int ordblks; /* number of free chunks */
  904. + int smblks; /* number of fastbin blocks */
  905. + int hblks; /* number of mmapped regions */
  906. + int hblkhd; /* space in mmapped regions */
  907. + int usmblks; /* maximum total allocated space */
  908. + int fsmblks; /* space available in freed fastbin blocks */
  909. + int uordblks; /* total allocated space */
  910. + int fordblks; /* total free space */
  911. + int keepcost; /* top-most, releasable (via malloc_trim) space */
  912. +};
  913. +
  914. +/*
  915. + SVID/XPG defines four standard parameter numbers for mallopt,
  916. + normally defined in malloc.h. Only one of these (M_MXFAST) is used
  917. + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
  918. + so setting them has no effect. But this malloc also supports other
  919. + options in mallopt described below.
  920. +*/
  921. +#endif
  922. +
  923. +
  924. +/* ---------- description of public routines ------------ */
  925. +
  926. +/*
  927. + malloc(size_t n)
  928. + Returns a pointer to a newly allocated chunk of at least n bytes, or null
  929. + if no space is available. Additionally, on failure, errno is
  930. + set to ENOMEM on ANSI C systems.
  931. +
  932. + If n is zero, malloc returns a minumum-sized chunk. (The minimum
  933. + size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
  934. + systems.) On most systems, size_t is an unsigned type, so calls
  935. + with negative arguments are interpreted as requests for huge amounts
  936. + of space, which will often fail. The maximum supported value of n
  937. + differs across systems, but is in all cases less than the maximum
  938. + representable value of a size_t.
  939. +*/
  940. +#if __STD_C
  941. +Void_t* public_mALLOc(size_t);
  942. +#else
  943. +Void_t* public_mALLOc();
  944. +#endif
  945. +
  946. +/*
  947. + free(Void_t* p)
  948. + Releases the chunk of memory pointed to by p, that had been previously
  949. + allocated using malloc or a related routine such as realloc.
  950. + It has no effect if p is null. It can have arbitrary (i.e., bad!)
  951. + effects if p has already been freed.
  952. +
  953. + Unless disabled (using mallopt), freeing very large spaces will
  954. + when possible, automatically trigger operations that give
  955. + back unused memory to the system, thus reducing program footprint.
  956. +*/
  957. +#if __STD_C
  958. +void public_fREe(Void_t*);
  959. +#else
  960. +void public_fREe();
  961. +#endif
  962. +
  963. +/*
  964. + calloc(size_t n_elements, size_t element_size);
  965. + Returns a pointer to n_elements * element_size bytes, with all locations
  966. + set to zero.
  967. +*/
  968. +#if __STD_C
  969. +Void_t* public_cALLOc(size_t, size_t);
  970. +#else
  971. +Void_t* public_cALLOc();
  972. +#endif
  973. +
  974. +/*
  975. + realloc(Void_t* p, size_t n)
  976. + Returns a pointer to a chunk of size n that contains the same data
  977. + as does chunk p up to the minimum of (n, p's size) bytes, or null
  978. + if no space is available.
  979. +
  980. + The returned pointer may or may not be the same as p. The algorithm
  981. + prefers extending p when possible, otherwise it employs the
  982. + equivalent of a malloc-copy-free sequence.
  983. +
  984. + If p is null, realloc is equivalent to malloc.
  985. +
  986. + If space is not available, realloc returns null, errno is set (if on
  987. + ANSI) and p is NOT freed.
  988. +
  989. + if n is for fewer bytes than already held by p, the newly unused
  990. + space is lopped off and freed if possible. Unless the #define
  991. + REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
  992. + zero (re)allocates a minimum-sized chunk.
  993. +
  994. + Large chunks that were internally obtained via mmap will always
  995. + be reallocated using malloc-copy-free sequences unless
  996. + the system supports MREMAP (currently only linux).
  997. +
  998. + The old unix realloc convention of allowing the last-free'd chunk
  999. + to be used as an argument to realloc is not supported.
  1000. +*/
  1001. +#if __STD_C
  1002. +Void_t* public_rEALLOc(Void_t*, size_t);
  1003. +#else
  1004. +Void_t* public_rEALLOc();
  1005. +#endif
  1006. +
  1007. +/*
  1008. + memalign(size_t alignment, size_t n);
  1009. + Returns a pointer to a newly allocated chunk of n bytes, aligned
  1010. + in accord with the alignment argument.
  1011. +
  1012. + The alignment argument should be a power of two. If the argument is
  1013. + not a power of two, the nearest greater power is used.
  1014. + 8-byte alignment is guaranteed by normal malloc calls, so don't
  1015. + bother calling memalign with an argument of 8 or less.
  1016. +
  1017. + Overreliance on memalign is a sure way to fragment space.
  1018. +*/
  1019. +#if __STD_C
  1020. +Void_t* public_mEMALIGn(size_t, size_t);
  1021. +#else
  1022. +Void_t* public_mEMALIGn();
  1023. +#endif
  1024. +
  1025. +/*
  1026. + valloc(size_t n);
  1027. + Equivalent to memalign(pagesize, n), where pagesize is the page
  1028. + size of the system. If the pagesize is unknown, 4096 is used.
  1029. +*/
  1030. +#if __STD_C
  1031. +Void_t* public_vALLOc(size_t);
  1032. +#else
  1033. +Void_t* public_vALLOc();
  1034. +#endif
  1035. +
  1036. +
  1037. +
  1038. +/*
  1039. + mallopt(int parameter_number, int parameter_value)
  1040. + Sets tunable parameters The format is to provide a
  1041. + (parameter-number, parameter-value) pair. mallopt then sets the
  1042. + corresponding parameter to the argument value if it can (i.e., so
  1043. + long as the value is meaningful), and returns 1 if successful else
  1044. + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
  1045. + normally defined in malloc.h. Only one of these (M_MXFAST) is used
  1046. + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
  1047. + so setting them has no effect. But this malloc also supports four
  1048. + other options in mallopt. See below for details. Briefly, supported
  1049. + parameters are as follows (listed defaults are for "typical"
  1050. + configurations).
  1051. +
  1052. + Symbol param # default allowed param values
  1053. + M_MXFAST 1 64 0-80 (0 disables fastbins)
  1054. + M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
  1055. + M_TOP_PAD -2 0 any
  1056. + M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
  1057. + M_MMAP_MAX -4 65536 any (0 disables use of mmap)
  1058. +*/
  1059. +#if __STD_C
  1060. +int public_mALLOPt(int, int);
  1061. +#else
  1062. +int public_mALLOPt();
  1063. +#endif
  1064. +
  1065. +
  1066. +/*
  1067. + mallinfo()
  1068. + Returns (by copy) a struct containing various summary statistics:
  1069. +
  1070. + arena: current total non-mmapped bytes allocated from system
  1071. + ordblks: the number of free chunks
  1072. + smblks: the number of fastbin blocks (i.e., small chunks that
  1073. + have been freed but not use resused or consolidated)
  1074. + hblks: current number of mmapped regions
  1075. + hblkhd: total bytes held in mmapped regions
  1076. + usmblks: the maximum total allocated space. This will be greater
  1077. + than current total if trimming has occurred.
  1078. + fsmblks: total bytes held in fastbin blocks
  1079. + uordblks: current total allocated space (normal or mmapped)
  1080. + fordblks: total free space
  1081. + keepcost: the maximum number of bytes that could ideally be released
  1082. + back to system via malloc_trim. ("ideally" means that
  1083. + it ignores page restrictions etc.)
  1084. +
  1085. + Because these fields are ints, but internal bookkeeping may
  1086. + be kept as longs, the reported values may wrap around zero and
  1087. + thus be inaccurate.
  1088. +*/
  1089. +#if __STD_C
  1090. +struct mallinfo public_mALLINFo(void);
  1091. +#else
  1092. +struct mallinfo public_mALLINFo();
  1093. +#endif
  1094. +
  1095. +/*
  1096. + independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
  1097. +
  1098. + independent_calloc is similar to calloc, but instead of returning a
  1099. + single cleared space, it returns an array of pointers to n_elements
  1100. + independent elements that can hold contents of size elem_size, each
  1101. + of which starts out cleared, and can be independently freed,
  1102. + realloc'ed etc. The elements are guaranteed to be adjacently
  1103. + allocated (this is not guaranteed to occur with multiple callocs or
  1104. + mallocs), which may also improve cache locality in some
  1105. + applications.
  1106. +
  1107. + The "chunks" argument is optional (i.e., may be null, which is
  1108. + probably the most typical usage). If it is null, the returned array
  1109. + is itself dynamically allocated and should also be freed when it is
  1110. + no longer needed. Otherwise, the chunks array must be of at least
  1111. + n_elements in length. It is filled in with the pointers to the
  1112. + chunks.
  1113. +
  1114. + In either case, independent_calloc returns this pointer array, or
  1115. + null if the allocation failed. If n_elements is zero and "chunks"
  1116. + is null, it returns a chunk representing an array with zero elements
  1117. + (which should be freed if not wanted).
  1118. +
  1119. + Each element must be individually freed when it is no longer
  1120. + needed. If you'd like to instead be able to free all at once, you
  1121. + should instead use regular calloc and assign pointers into this
  1122. + space to represent elements. (In this case though, you cannot
  1123. + independently free elements.)
  1124. +
  1125. + independent_calloc simplifies and speeds up implementations of many
  1126. + kinds of pools. It may also be useful when constructing large data
  1127. + structures that initially have a fixed number of fixed-sized nodes,
  1128. + but the number is not known at compile time, and some of the nodes
  1129. + may later need to be freed. For example:
  1130. +
  1131. + struct Node { int item; struct Node* next; };
  1132. +
  1133. + struct Node* build_list() {
  1134. + struct Node** pool;
  1135. + int n = read_number_of_nodes_needed();
  1136. + if (n <= 0) return 0;
  1137. + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
  1138. + if (pool == 0) die();
  1139. + // organize into a linked list...
  1140. + struct Node* first = pool[0];
  1141. + for (i = 0; i < n-1; ++i)
  1142. + pool[i]->next = pool[i+1];
  1143. + free(pool); // Can now free the array (or not, if it is needed later)
  1144. + return first;
  1145. + }
  1146. +*/
  1147. +#if __STD_C
  1148. +Void_t** public_iCALLOc(size_t, size_t, Void_t**);
  1149. +#else
  1150. +Void_t** public_iCALLOc();
  1151. +#endif
  1152. +
  1153. +/*
  1154. + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
  1155. +
  1156. + independent_comalloc allocates, all at once, a set of n_elements
  1157. + chunks with sizes indicated in the "sizes" array. It returns
  1158. + an array of pointers to these elements, each of which can be
  1159. + independently freed, realloc'ed etc. The elements are guaranteed to
  1160. + be adjacently allocated (this is not guaranteed to occur with
  1161. + multiple callocs or mallocs), which may also improve cache locality
  1162. + in some applications.
  1163. +
  1164. + The "chunks" argument is optional (i.e., may be null). If it is null
  1165. + the returned array is itself dynamically allocated and should also
  1166. + be freed when it is no longer needed. Otherwise, the chunks array
  1167. + must be of at least n_elements in length. It is filled in with the
  1168. + pointers to the chunks.
  1169. +
  1170. + In either case, independent_comalloc returns this pointer array, or
  1171. + null if the allocation failed. If n_elements is zero and chunks is
  1172. + null, it returns a chunk representing an array with zero elements
  1173. + (which should be freed if not wanted).
  1174. +
  1175. + Each element must be individually freed when it is no longer
  1176. + needed. If you'd like to instead be able to free all at once, you
  1177. + should instead use a single regular malloc, and assign pointers at
  1178. + particular offsets in the aggregate space. (In this case though, you
  1179. + cannot independently free elements.)
  1180. +
  1181. + independent_comallac differs from independent_calloc in that each
  1182. + element may have a different size, and also that it does not
  1183. + automatically clear elements.
  1184. +
  1185. + independent_comalloc can be used to speed up allocation in cases
  1186. + where several structs or objects must always be allocated at the
  1187. + same time. For example:
  1188. +
  1189. + struct Head { ... }
  1190. + struct Foot { ... }
  1191. +
  1192. + void send_message(char* msg) {
  1193. + int msglen = strlen(msg);
  1194. + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
  1195. + void* chunks[3];
  1196. + if (independent_comalloc(3, sizes, chunks) == 0)
  1197. + die();
  1198. + struct Head* head = (struct Head*)(chunks[0]);
  1199. + char* body = (char*)(chunks[1]);
  1200. + struct Foot* foot = (struct Foot*)(chunks[2]);
  1201. + // ...
  1202. + }
  1203. +
  1204. + In general though, independent_comalloc is worth using only for
  1205. + larger values of n_elements. For small values, you probably won't
  1206. + detect enough difference from series of malloc calls to bother.
  1207. +
  1208. + Overuse of independent_comalloc can increase overall memory usage,
  1209. + since it cannot reuse existing noncontiguous small chunks that
  1210. + might be available for some of the elements.
  1211. +*/
  1212. +#if __STD_C
  1213. +Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
  1214. +#else
  1215. +Void_t** public_iCOMALLOc();
  1216. +#endif
  1217. +
  1218. +
  1219. +/*
  1220. + pvalloc(size_t n);
  1221. + Equivalent to valloc(minimum-page-that-holds(n)), that is,
  1222. + round up n to nearest pagesize.
  1223. + */
  1224. +#if __STD_C
  1225. +Void_t* public_pVALLOc(size_t);
  1226. +#else
  1227. +Void_t* public_pVALLOc();
  1228. +#endif
  1229. +
  1230. +/*
  1231. + cfree(Void_t* p);
  1232. + Equivalent to free(p).
  1233. +
  1234. + cfree is needed/defined on some systems that pair it with calloc,
  1235. + for odd historical reasons (such as: cfree is used in example
  1236. + code in the first edition of K&R).
  1237. +*/
  1238. +#if __STD_C
  1239. +void public_cFREe(Void_t*);
  1240. +#else
  1241. +void public_cFREe();
  1242. +#endif
  1243. +
  1244. +/*
  1245. + malloc_trim(size_t pad);
  1246. +
  1247. + If possible, gives memory back to the system (via negative
  1248. + arguments to sbrk) if there is unused memory at the `high' end of
  1249. + the malloc pool. You can call this after freeing large blocks of
  1250. + memory to potentially reduce the system-level memory requirements
  1251. + of a program. However, it cannot guarantee to reduce memory. Under
  1252. + some allocation patterns, some large free blocks of memory will be
  1253. + locked between two used chunks, so they cannot be given back to
  1254. + the system.
  1255. +
  1256. + The `pad' argument to malloc_trim represents the amount of free
  1257. + trailing space to leave untrimmed. If this argument is zero,
  1258. + only the minimum amount of memory to maintain internal data
  1259. + structures will be left (one page or less). Non-zero arguments
  1260. + can be supplied to maintain enough trailing space to service
  1261. + future expected allocations without having to re-obtain memory
  1262. + from the system.
  1263. +
  1264. + Malloc_trim returns 1 if it actually released any memory, else 0.
  1265. + On systems that do not support "negative sbrks", it will always
  1266. + rreturn 0.
  1267. +*/
  1268. +#if __STD_C
  1269. +int public_mTRIm(size_t);
  1270. +#else
  1271. +int public_mTRIm();
  1272. +#endif
  1273. +
  1274. +/*
  1275. + malloc_usable_size(Void_t* p);
  1276. +
  1277. + Returns the number of bytes you can actually use in
  1278. + an allocated chunk, which may be more than you requested (although
  1279. + often not) due to alignment and minimum size constraints.
  1280. + You can use this many bytes without worrying about
  1281. + overwriting other allocated objects. This is not a particularly great
  1282. + programming practice. malloc_usable_size can be more useful in
  1283. + debugging and assertions, for example:
  1284. +
  1285. + p = malloc(n);
  1286. + assert(malloc_usable_size(p) >= 256);
  1287. +
  1288. +*/
  1289. +#if __STD_C
  1290. +size_t public_mUSABLe(Void_t*);
  1291. +#else
  1292. +size_t public_mUSABLe();
  1293. +#endif
  1294. +
  1295. +/*
  1296. + malloc_stats();
  1297. + Prints on stderr the amount of space obtained from the system (both
  1298. + via sbrk and mmap), the maximum amount (which may be more than
  1299. + current if malloc_trim and/or munmap got called), and the current
  1300. + number of bytes allocated via malloc (or realloc, etc) but not yet
  1301. + freed. Note that this is the number of bytes allocated, not the
  1302. + number requested. It will be larger than the number requested
  1303. + because of alignment and bookkeeping overhead. Because it includes
  1304. + alignment wastage as being in use, this figure may be greater than
  1305. + zero even when no user-level chunks are allocated.
  1306. +
  1307. + The reported current and maximum system memory can be inaccurate if
  1308. + a program makes other calls to system memory allocation functions
  1309. + (normally sbrk) outside of malloc.
  1310. +
  1311. + malloc_stats prints only the most commonly interesting statistics.
  1312. + More information can be obtained by calling mallinfo.
  1313. +
  1314. +*/
  1315. +#if __STD_C
  1316. +void public_mSTATs();
  1317. +#else
  1318. +void public_mSTATs();
  1319. +#endif
  1320. +
  1321. +/* mallopt tuning options */
  1322. +
  1323. +/*
  1324. + M_MXFAST is the maximum request size used for "fastbins", special bins
  1325. + that hold returned chunks without consolidating their spaces. This
  1326. + enables future requests for chunks of the same size to be handled
  1327. + very quickly, but can increase fragmentation, and thus increase the
  1328. + overall memory footprint of a program.
  1329. +
  1330. + This malloc manages fastbins very conservatively yet still
  1331. + efficiently, so fragmentation is rarely a problem for values less
  1332. + than or equal to the default. The maximum supported value of MXFAST
  1333. + is 80. You wouldn't want it any higher than this anyway. Fastbins
  1334. + are designed especially for use with many small structs, objects or
  1335. + strings -- the default handles structs/objects/arrays with sizes up
  1336. + to 8 4byte fields, or small strings representing words, tokens,
  1337. + etc. Using fastbins for larger objects normally worsens
  1338. + fragmentation without improving speed.
  1339. +
  1340. + M_MXFAST is set in REQUEST size units. It is internally used in
  1341. + chunksize units, which adds padding and alignment. You can reduce
  1342. + M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
  1343. + algorithm to be a closer approximation of fifo-best-fit in all cases,
  1344. + not just for larger requests, but will generally cause it to be
  1345. + slower.
  1346. +*/
  1347. +
  1348. +
  1349. +/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
  1350. +#ifndef M_MXFAST
  1351. +#define M_MXFAST 1
  1352. +#endif
  1353. +
  1354. +#ifndef DEFAULT_MXFAST
  1355. +#define DEFAULT_MXFAST 64
  1356. +#endif
  1357. +
  1358. +
  1359. +/*
  1360. + M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
  1361. + to keep before releasing via malloc_trim in free().
  1362. +
  1363. + Automatic trimming is mainly useful in long-lived programs.
  1364. + Because trimming via sbrk can be slow on some systems, and can
  1365. + sometimes be wasteful (in cases where programs immediately
  1366. + afterward allocate more large chunks) the value should be high
  1367. + enough so that your overall system performance would improve by
  1368. + releasing this much memory.
  1369. +
  1370. + The trim threshold and the mmap control parameters (see below)
  1371. + can be traded off with one another. Trimming and mmapping are
  1372. + two different ways of releasing unused memory back to the
  1373. + system. Between these two, it is often possible to keep
  1374. + system-level demands of a long-lived program down to a bare
  1375. + minimum. For example, in one test suite of sessions measuring
  1376. + the XF86 X server on Linux, using a trim threshold of 128K and a
  1377. + mmap threshold of 192K led to near-minimal long term resource
  1378. + consumption.
  1379. +
  1380. + If you are using this malloc in a long-lived program, it should
  1381. + pay to experiment with these values. As a rough guide, you
  1382. + might set to a value close to the average size of a process
  1383. + (program) running on your system. Releasing this much memory
  1384. + would allow such a process to run in memory. Generally, it's
  1385. + worth it to tune for trimming rather tham memory mapping when a
  1386. + program undergoes phases where several large chunks are
  1387. + allocated and released in ways that can reuse each other's
  1388. + storage, perhaps mixed with phases where there are no such
  1389. + chunks at all. And in well-behaved long-lived programs,
  1390. + controlling release of large blocks via trimming versus mapping
  1391. + is usually faster.
  1392. +
  1393. + However, in most programs, these parameters serve mainly as
  1394. + protection against the system-level effects of carrying around
  1395. + massive amounts of unneeded memory. Since frequent calls to
  1396. + sbrk, mmap, and munmap otherwise degrade performance, the default
  1397. + parameters are set to relatively high values that serve only as
  1398. + safeguards.
  1399. +
  1400. + The trim value It must be greater than page size to have any useful
  1401. + effect. To disable trimming completely, you can set to
  1402. + (unsigned long)(-1)
  1403. +
  1404. + Trim settings interact with fastbin (MXFAST) settings: Unless
  1405. + TRIM_FASTBINS is defined, automatic trimming never takes place upon
  1406. + freeing a chunk with size less than or equal to MXFAST. Trimming is
  1407. + instead delayed until subsequent freeing of larger chunks. However,
  1408. + you can still force an attempted trim by calling malloc_trim.
  1409. +
  1410. + Also, trimming is not generally possible in cases where
  1411. + the main arena is obtained via mmap.
  1412. +
  1413. + Note that the trick some people use of mallocing a huge space and
  1414. + then freeing it at program startup, in an attempt to reserve system
  1415. + memory, doesn't have the intended effect under automatic trimming,
  1416. + since that memory will immediately be returned to the system.
  1417. +*/
  1418. +
  1419. +#define M_TRIM_THRESHOLD -1
  1420. +
  1421. +#ifndef DEFAULT_TRIM_THRESHOLD
  1422. +#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
  1423. +#endif
  1424. +
  1425. +/*
  1426. + M_TOP_PAD is the amount of extra `padding' space to allocate or
  1427. + retain whenever sbrk is called. It is used in two ways internally:
  1428. +
  1429. + * When sbrk is called to extend the top of the arena to satisfy
  1430. + a new malloc request, this much padding is added to the sbrk
  1431. + request.
  1432. +
  1433. + * When malloc_trim is called automatically from free(),
  1434. + it is used as the `pad' argument.
  1435. +
  1436. + In both cases, the actual amount of padding is rounded
  1437. + so that the end of the arena is always a system page boundary.
  1438. +
  1439. + The main reason for using padding is to avoid calling sbrk so
  1440. + often. Having even a small pad greatly reduces the likelihood
  1441. + that nearly every malloc request during program start-up (or
  1442. + after trimming) will invoke sbrk, which needlessly wastes
  1443. + time.
  1444. +
  1445. + Automatic rounding-up to page-size units is normally sufficient
  1446. + to avoid measurable overhead, so the default is 0. However, in
  1447. + systems where sbrk is relatively slow, it can pay to increase
  1448. + this value, at the expense of carrying around more memory than
  1449. + the program needs.
  1450. +*/
  1451. +
  1452. +#define M_TOP_PAD -2
  1453. +
  1454. +#ifndef DEFAULT_TOP_PAD
  1455. +#define DEFAULT_TOP_PAD (0)
  1456. +#endif
  1457. +
  1458. +/*
  1459. + M_MMAP_THRESHOLD is the request size threshold for using mmap()
  1460. + to service a request. Requests of at least this size that cannot
  1461. + be allocated using already-existing space will be serviced via mmap.
  1462. + (If enough normal freed space already exists it is used instead.)
  1463. +
  1464. + Using mmap segregates relatively large chunks of memory so that
  1465. + they can be individually obtained and released from the host
  1466. + system. A request serviced through mmap is never reused by any
  1467. + other request (at least not directly; the system may just so
  1468. + happen to remap successive requests to the same locations).
  1469. +
  1470. + Segregating space in this way has the benefits that:
  1471. +
  1472. + 1. Mmapped space can ALWAYS be individually released back
  1473. + to the system, which helps keep the system level memory
  1474. + demands of a long-lived program low.
  1475. + 2. Mapped memory can never become `locked' between
  1476. + other chunks, as can happen with normally allocated chunks, which
  1477. + means that even trimming via malloc_trim would not release them.
  1478. + 3. On some systems with "holes" in address spaces, mmap can obtain
  1479. + memory that sbrk cannot.
  1480. +
  1481. + However, it has the disadvantages that:
  1482. +
  1483. + 1. The space cannot be reclaimed, consolidated, and then
  1484. + used to service later requests, as happens with normal chunks.
  1485. + 2. It can lead to more wastage because of mmap page alignment
  1486. + requirements
  1487. + 3. It causes malloc performance to be more dependent on host
  1488. + system memory management support routines which may vary in
  1489. + implementation quality and may impose arbitrary
  1490. + limitations. Generally, servicing a request via normal
  1491. + malloc steps is faster than going through a system's mmap.
  1492. +
  1493. + The advantages of mmap nearly always outweigh disadvantages for
  1494. + "large" chunks, but the value of "large" varies across systems. The
  1495. + default is an empirically derived value that works well in most
  1496. + systems.
  1497. +*/
  1498. +
  1499. +#define M_MMAP_THRESHOLD -3
  1500. +
  1501. +#ifndef DEFAULT_MMAP_THRESHOLD
  1502. +#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
  1503. +#endif
  1504. +
  1505. +/*
  1506. + M_MMAP_MAX is the maximum number of requests to simultaneously
  1507. + service using mmap. This parameter exists because
  1508. +. Some systems have a limited number of internal tables for
  1509. + use by mmap, and using more than a few of them may degrade
  1510. + performance.
  1511. +
  1512. + The default is set to a value that serves only as a safeguard.
  1513. + Setting to 0 disables use of mmap for servicing large requests. If
  1514. + HAVE_MMAP is not set, the default value is 0, and attempts to set it
  1515. + to non-zero values in mallopt will fail.
  1516. +*/
  1517. +
  1518. +#define M_MMAP_MAX -4
  1519. +
  1520. +#ifndef DEFAULT_MMAP_MAX
  1521. +#if HAVE_MMAP
  1522. +#define DEFAULT_MMAP_MAX (65536)
  1523. +#else
  1524. +#define DEFAULT_MMAP_MAX (0)
  1525. +#endif
  1526. +#endif
  1527. +
  1528. +#ifdef __cplusplus
  1529. +}; /* end of extern "C" */
  1530. +#endif
  1531. +
  1532. +/*
  1533. + ========================================================================
  1534. + To make a fully customizable malloc.h header file, cut everything
  1535. + above this line, put into file malloc.h, edit to suit, and #include it
  1536. + on the next line, as well as in programs that use this malloc.
  1537. + ========================================================================
  1538. +*/
  1539. +
  1540. +/* #include "malloc.h" */
  1541. +
  1542. +/* --------------------- public wrappers ---------------------- */
  1543. +
  1544. +#ifdef USE_PUBLIC_MALLOC_WRAPPERS
  1545. +
  1546. +/* Declare all routines as internal */
  1547. +#if __STD_C
  1548. +static Void_t* mALLOc(size_t);
  1549. +static void fREe(Void_t*);
  1550. +static Void_t* rEALLOc(Void_t*, size_t);
  1551. +static Void_t* mEMALIGn(size_t, size_t);
  1552. +static Void_t* vALLOc(size_t);
  1553. +static Void_t* pVALLOc(size_t);
  1554. +static Void_t* cALLOc(size_t, size_t);
  1555. +static Void_t** iCALLOc(size_t, size_t, Void_t**);
  1556. +static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
  1557. +static void cFREe(Void_t*);
  1558. +static int mTRIm(size_t);
  1559. +static size_t mUSABLe(Void_t*);
  1560. +static void mSTATs();
  1561. +static int mALLOPt(int, int);
  1562. +static struct mallinfo mALLINFo(void);
  1563. +#else
  1564. +static Void_t* mALLOc();
  1565. +static void fREe();
  1566. +static Void_t* rEALLOc();
  1567. +static Void_t* mEMALIGn();
  1568. +static Void_t* vALLOc();
  1569. +static Void_t* pVALLOc();
  1570. +static Void_t* cALLOc();
  1571. +static Void_t** iCALLOc();
  1572. +static Void_t** iCOMALLOc();
  1573. +static void cFREe();
  1574. +static int mTRIm();
  1575. +static size_t mUSABLe();
  1576. +static void mSTATs();
  1577. +static int mALLOPt();
  1578. +static struct mallinfo mALLINFo();
  1579. +#endif
  1580. +
  1581. +/*
  1582. + MALLOC_PREACTION and MALLOC_POSTACTION should be
  1583. + defined to return 0 on success, and nonzero on failure.
  1584. + The return value of MALLOC_POSTACTION is currently ignored
  1585. + in wrapper functions since there is no reasonable default
  1586. + action to take on failure.
  1587. +*/
  1588. +
  1589. +
  1590. +#ifdef USE_MALLOC_LOCK
  1591. +
  1592. +#ifdef WIN32
  1593. +
  1594. +static int mALLOC_MUTEx;
  1595. +#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
  1596. +#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
  1597. +
  1598. +#else
  1599. +
  1600. +#if 0
  1601. +#include <pthread.h>
  1602. +
  1603. +static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
  1604. +
  1605. +#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
  1606. +#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
  1607. +
  1608. +#else
  1609. +
  1610. +#ifdef KDE_MALLOC_X86
  1611. +#include "x86.h"
  1612. +#elif defined(KDE_MALLOC_AVR32)
  1613. +
  1614. +#include <sched.h>
  1615. +#include <time.h>
  1616. +
  1617. +static __inline__ int q_atomic_swp(volatile unsigned int *ptr,
  1618. + unsigned int newval)
  1619. +{
  1620. + register int ret;
  1621. + asm volatile("xchg %0,%1,%2"
  1622. + : "=&r"(ret)
  1623. + : "r"(ptr), "r"(newval)
  1624. + : "memory", "cc");
  1625. + return ret;
  1626. +}
  1627. +
  1628. +typedef struct {
  1629. + volatile unsigned int lock;
  1630. + int pad0_;
  1631. +} mutex_t;
  1632. +
  1633. +#define MUTEX_INITIALIZER { 0, 0 }
  1634. +
  1635. +static __inline__ int lock(mutex_t *m) {
  1636. + int cnt = 0;
  1637. + struct timespec tm;
  1638. +
  1639. + for(;;) {
  1640. + if (q_atomic_swp(&m->lock, 1) == 0)
  1641. + return 0;
  1642. +#ifdef _POSIX_PRIORITY_SCHEDULING
  1643. + if(cnt < 50) {
  1644. + sched_yield();
  1645. + cnt++;
  1646. + } else
  1647. +#endif
  1648. + {
  1649. + tm.tv_sec = 0;
  1650. + tm.tv_nsec = 2000001;
  1651. + nanosleep(&tm, NULL);
  1652. + cnt = 0;
  1653. + }
  1654. + }
  1655. +}
  1656. +
  1657. +static __inline__ int unlock(mutex_t *m) {
  1658. + m->lock = 0;
  1659. + return 0;
  1660. +}
  1661. +
  1662. +#else
  1663. +#error Unknown spinlock implementation
  1664. +#endif
  1665. +
  1666. +static mutex_t spinlock = MUTEX_INITIALIZER;
  1667. +
  1668. +#define MALLOC_PREACTION lock( &spinlock )
  1669. +#define MALLOC_POSTACTION unlock( &spinlock )
  1670. +
  1671. +#endif
  1672. +
  1673. +#endif /* USE_MALLOC_LOCK */
  1674. +
  1675. +#else
  1676. +
  1677. +/* Substitute anything you like for these */
  1678. +
  1679. +#define MALLOC_PREACTION (0)
  1680. +#define MALLOC_POSTACTION (0)
  1681. +
  1682. +#endif
  1683. +
  1684. +#if 0
  1685. +Void_t* public_mALLOc(size_t bytes) {
  1686. + Void_t* m;
  1687. + if (MALLOC_PREACTION != 0) {
  1688. + return 0;
  1689. + }
  1690. + m = mALLOc(bytes);
  1691. + if (MALLOC_POSTACTION != 0) {
  1692. + }
  1693. + return m;
  1694. +}
  1695. +
  1696. +void public_fREe(Void_t* m) {
  1697. + if (MALLOC_PREACTION != 0) {
  1698. + return;
  1699. + }
  1700. + fREe(m);
  1701. + if (MALLOC_POSTACTION != 0) {
  1702. + }
  1703. +}
  1704. +
  1705. +Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
  1706. + if (MALLOC_PREACTION != 0) {
  1707. + return 0;
  1708. + }
  1709. + m = rEALLOc(m, bytes);
  1710. + if (MALLOC_POSTACTION != 0) {
  1711. + }
  1712. + return m;
  1713. +}
  1714. +
  1715. +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
  1716. + Void_t* m;
  1717. + if (MALLOC_PREACTION != 0) {
  1718. + return 0;
  1719. + }
  1720. + m = mEMALIGn(alignment, bytes);
  1721. + if (MALLOC_POSTACTION != 0) {
  1722. + }
  1723. + return m;
  1724. +}
  1725. +
  1726. +Void_t* public_vALLOc(size_t bytes) {
  1727. + Void_t* m;
  1728. + if (MALLOC_PREACTION != 0) {
  1729. + return 0;
  1730. + }
  1731. + m = vALLOc(bytes);
  1732. + if (MALLOC_POSTACTION != 0) {
  1733. + }
  1734. + return m;
  1735. +}
  1736. +
  1737. +Void_t* public_pVALLOc(size_t bytes) {
  1738. + Void_t* m;
  1739. + if (MALLOC_PREACTION != 0) {
  1740. + return 0;
  1741. + }
  1742. + m = pVALLOc(bytes);
  1743. + if (MALLOC_POSTACTION != 0) {
  1744. + }
  1745. + return m;
  1746. +}
  1747. +
  1748. +Void_t* public_cALLOc(size_t n, size_t elem_size) {
  1749. + Void_t* m;
  1750. + if (MALLOC_PREACTION != 0) {
  1751. + return 0;
  1752. + }
  1753. + m = cALLOc(n, elem_size);
  1754. + if (MALLOC_POSTACTION != 0) {
  1755. + }
  1756. + return m;
  1757. +}
  1758. +
  1759. +
  1760. +Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
  1761. + Void_t** m;
  1762. + if (MALLOC_PREACTION != 0) {
  1763. + return 0;
  1764. + }
  1765. + m = iCALLOc(n, elem_size, chunks);
  1766. + if (MALLOC_POSTACTION != 0) {
  1767. + }
  1768. + return m;
  1769. +}
  1770. +
  1771. +Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
  1772. + Void_t** m;
  1773. + if (MALLOC_PREACTION != 0) {
  1774. + return 0;
  1775. + }
  1776. + m = iCOMALLOc(n, sizes, chunks);
  1777. + if (MALLOC_POSTACTION != 0) {
  1778. + }
  1779. + return m;
  1780. +}
  1781. +
  1782. +void public_cFREe(Void_t* m) {
  1783. + if (MALLOC_PREACTION != 0) {
  1784. + return;
  1785. + }
  1786. + cFREe(m);
  1787. + if (MALLOC_POSTACTION != 0) {
  1788. + }
  1789. +}
  1790. +
  1791. +int public_mTRIm(size_t s) {
  1792. + int result;
  1793. + if (MALLOC_PREACTION != 0) {
  1794. + return 0;
  1795. + }
  1796. + result = mTRIm(s);
  1797. + if (MALLOC_POSTACTION != 0) {
  1798. + }
  1799. + return result;
  1800. +}
  1801. +
  1802. +size_t public_mUSABLe(Void_t* m) {
  1803. + size_t result;
  1804. + if (MALLOC_PREACTION != 0) {
  1805. + return 0;
  1806. + }
  1807. + result = mUSABLe(m);
  1808. + if (MALLOC_POSTACTION != 0) {
  1809. + }
  1810. + return result;
  1811. +}
  1812. +
  1813. +void public_mSTATs() {
  1814. + if (MALLOC_PREACTION != 0) {
  1815. + return;
  1816. + }
  1817. + mSTATs();
  1818. + if (MALLOC_POSTACTION != 0) {
  1819. + }
  1820. +}
  1821. +
  1822. +struct mallinfo public_mALLINFo() {
  1823. + struct mallinfo m;
  1824. + if (MALLOC_PREACTION != 0) {
  1825. + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  1826. + return nm;
  1827. + }
  1828. + m = mALLINFo();
  1829. + if (MALLOC_POSTACTION != 0) {
  1830. + }
  1831. + return m;
  1832. +}
  1833. +
  1834. +int public_mALLOPt(int p, int v) {
  1835. + int result;
  1836. + if (MALLOC_PREACTION != 0) {
  1837. + return 0;
  1838. + }
  1839. + result = mALLOPt(p, v);
  1840. + if (MALLOC_POSTACTION != 0) {
  1841. + }
  1842. + return result;
  1843. +}
  1844. +#endif
  1845. +
  1846. +#endif
  1847. +
  1848. +
  1849. +
  1850. +/* ------------- Optional versions of memcopy ---------------- */
  1851. +
  1852. +
  1853. +#if USE_MEMCPY
  1854. +
  1855. +/*
  1856. + Note: memcpy is ONLY invoked with non-overlapping regions,
  1857. + so the (usually slower) memmove is not needed.
  1858. +*/
  1859. +
  1860. +#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
  1861. +#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
  1862. +
  1863. +#else /* !USE_MEMCPY */
  1864. +
  1865. +/* Use Duff's device for good zeroing/copying performance. */
  1866. +
  1867. +#define MALLOC_ZERO(charp, nbytes) \
  1868. +do { \
  1869. + INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
  1870. + unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
  1871. + long mcn; \
  1872. + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
  1873. + switch (mctmp) { \
  1874. + case 0: for(;;) { *mzp++ = 0; \
  1875. + case 7: *mzp++ = 0; \
  1876. + case 6: *mzp++ = 0; \
  1877. + case 5: *mzp++ = 0; \
  1878. + case 4: *mzp++ = 0; \
  1879. + case 3: *mzp++ = 0; \
  1880. + case 2: *mzp++ = 0; \
  1881. + case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
  1882. + } \
  1883. +} while(0)
  1884. +
  1885. +#define MALLOC_COPY(dest,src,nbytes) \
  1886. +do { \
  1887. + INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
  1888. + INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
  1889. + unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
  1890. + long mcn; \
  1891. + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
  1892. + switch (mctmp) { \
  1893. + case 0: for(;;) { *mcdst++ = *mcsrc++; \
  1894. + case 7: *mcdst++ = *mcsrc++; \
  1895. + case 6: *mcdst++ = *mcsrc++; \
  1896. + case 5: *mcdst++ = *mcsrc++; \
  1897. + case 4: *mcdst++ = *mcsrc++; \
  1898. + case 3: *mcdst++ = *mcsrc++; \
  1899. + case 2: *mcdst++ = *mcsrc++; \
  1900. + case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
  1901. + } \
  1902. +} while(0)
  1903. +
  1904. +#endif
  1905. +
  1906. +/* ------------------ MMAP support ------------------ */
  1907. +
  1908. +
  1909. +#if HAVE_MMAP
  1910. +
  1911. +#include <fcntl.h>
  1912. +#ifndef LACKS_SYS_MMAN_H
  1913. +#include <sys/mman.h>
  1914. +#endif
  1915. +
  1916. +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1917. +#define MAP_ANONYMOUS MAP_ANON
  1918. +#endif
  1919. +
  1920. +/*
  1921. + Nearly all versions of mmap support MAP_ANONYMOUS,
  1922. + so the following is unlikely to be needed, but is
  1923. + supplied just in case.
  1924. +*/
  1925. +
  1926. +#ifndef MAP_ANONYMOUS
  1927. +
  1928. +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1929. +
  1930. +#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
  1931. + (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1932. + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
  1933. + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
  1934. +
  1935. +#else
  1936. +
  1937. +#define MMAP(addr, size, prot, flags) \
  1938. + (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
  1939. +
  1940. +#endif
  1941. +
  1942. +
  1943. +#endif /* HAVE_MMAP */
  1944. +
  1945. +
  1946. +/*
  1947. + ----------------------- Chunk representations -----------------------
  1948. +*/
  1949. +
  1950. +
  1951. +/*
  1952. + This struct declaration is misleading (but accurate and necessary).
  1953. + It declares a "view" into memory allowing access to necessary
  1954. + fields at known offsets from a given base. See explanation below.
  1955. +*/
  1956. +
  1957. +struct malloc_chunk {
  1958. +
  1959. + INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
  1960. + INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
  1961. +
  1962. + struct malloc_chunk* fd; /* double links -- used only if free. */
  1963. + struct malloc_chunk* bk;
  1964. +};
  1965. +
  1966. +
  1967. +typedef struct malloc_chunk* mchunkptr;
  1968. +
  1969. +/*
  1970. + malloc_chunk details:
  1971. +
  1972. + (The following includes lightly edited explanations by Colin Plumb.)
  1973. +
  1974. + Chunks of memory are maintained using a `boundary tag' method as
  1975. + described in e.g., Knuth or Standish. (See the paper by Paul
  1976. + Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
  1977. + survey of such techniques.) Sizes of free chunks are stored both
  1978. + in the front of each chunk and at the end. This makes
  1979. + consolidating fragmented chunks into bigger chunks very fast. The
  1980. + size fields also hold bits representing whether chunks are free or
  1981. + in use.
  1982. +
  1983. + An allocated chunk looks like this:
  1984. +
  1985. +
  1986. + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1987. + | Size of previous chunk, if allocated | |
  1988. + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1989. + | Size of chunk, in bytes |P|
  1990. + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1991. + | User data starts here... .
  1992. + . .
  1993. + . (malloc_usable_space() bytes) .
  1994. + . |
  1995. +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1996. + | Size of chunk |
  1997. + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1998. +
  1999. +
  2000. + Where "chunk" is the front of the chunk for the purpose of most of
  2001. + the malloc code, but "mem" is the pointer that is returned to the
  2002. + user. "Nextchunk" is the beginning of the next contiguous chunk.
  2003. +
  2004. + Chunks always begin on even word boundaries, so the mem portion
  2005. + (which is returned to the user) is also on an even word boundary, and
  2006. + thus at least double-word aligned.
  2007. +
  2008. + Free chunks are stored in circular doubly-linked lists, and look like this:
  2009. +
  2010. + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2011. + | Size of previous chunk |
  2012. + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2013. + `head:' | Size of chunk, in bytes |P|
  2014. + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2015. + | Forward pointer to next chunk in list |
  2016. + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2017. + | Back pointer to previous chunk in list |
  2018. + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2019. + | Unused space (may be 0 bytes long) .
  2020. + . .
  2021. + . |
  2022. +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2023. + `foot:' | Size of chunk, in bytes |
  2024. + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  2025. +
  2026. + The P (PREV_INUSE) bit, stored in the unused low-order bit of the
  2027. + chunk size (which is always a multiple of two words), is an in-use
  2028. + bit for the *previous* chunk. If that bit is *clear*, then the
  2029. + word before the current chunk size contains the previous chunk
  2030. + size, and can be used to find the front of the previous chunk.
  2031. + The very first chunk allocated always has this bit set,
  2032. + preventing access to non-existent (or non-owned) memory. If
  2033. + prev_inuse is set for any given chunk, then you CANNOT determine
  2034. + the size of the previous chunk, and might even get a memory
  2035. + addressing fault when trying to do so.
  2036. +
  2037. + Note that the `foot' of the current chunk is actually represented
  2038. + as the prev_size of the NEXT chunk. This makes it easier to
  2039. + deal with alignments etc but can be very confusing when trying
  2040. + to extend or adapt this code.
  2041. +
  2042. + The two exceptions to all this are
  2043. +
  2044. + 1. The special chunk `top' doesn't bother using the
  2045. + trailing size field since there is no next contiguous chunk
  2046. + that would have to index off it. After initialization, `top'
  2047. + is forced to always exist. If it would become less than
  2048. + MINSIZE bytes long, it is replenished.
  2049. +
  2050. + 2. Chunks allocated via mmap, which have the second-lowest-order
  2051. + bit (IS_MMAPPED) set in their size fields. Because they are
  2052. + allocated one-by-one, each must contain its own trailing size field.
  2053. +
  2054. +*/
  2055. +
  2056. +/*
  2057. + ---------- Size and alignment checks and conversions ----------
  2058. +*/
  2059. +
  2060. +/* conversion from malloc headers to user pointers, and back */
  2061. +
  2062. +#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
  2063. +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
  2064. +
  2065. +/* The smallest possible chunk */
  2066. +#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
  2067. +
  2068. +/* The smallest size we can malloc is an aligned minimal chunk */
  2069. +
  2070. +#define MINSIZE \
  2071. + (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
  2072. +
  2073. +/* Check if m has acceptable alignment */
  2074. +
  2075. +#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
  2076. +
  2077. +
  2078. +/*
  2079. + Check if a request is so large that it would wrap around zero when
  2080. + padded and aligned. To simplify some other code, the bound is made
  2081. + low enough so that adding MINSIZE will also not wrap around zero.
  2082. +*/
  2083. +
  2084. +#define REQUEST_OUT_OF_RANGE(req) \
  2085. + ((unsigned long)(req) >= \
  2086. + (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
  2087. +
  2088. +/* pad request bytes into a usable size -- internal version */
  2089. +
  2090. +#define request2size(req) \
  2091. + (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
  2092. + MINSIZE : \
  2093. + ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
  2094. +
  2095. +/* Same, except also perform argument check */
  2096. +
  2097. +#define checked_request2size(req, sz) \
  2098. + if (REQUEST_OUT_OF_RANGE(req)) { \
  2099. + MALLOC_FAILURE_ACTION; \
  2100. + return 0; \
  2101. + } \
  2102. + (sz) = request2size(req);
  2103. +
  2104. +/*
  2105. + --------------- Physical chunk operations ---------------
  2106. +*/
  2107. +
  2108. +
  2109. +/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
  2110. +#define PREV_INUSE 0x1
  2111. +
  2112. +/* extract inuse bit of previous chunk */
  2113. +#define prev_inuse(p) ((p)->size & PREV_INUSE)
  2114. +
  2115. +
  2116. +/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
  2117. +#define IS_MMAPPED 0x2
  2118. +
  2119. +/* check for mmap()'ed chunk */
  2120. +#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
  2121. +
  2122. +/*
  2123. + Bits to mask off when extracting size
  2124. +
  2125. + Note: IS_MMAPPED is intentionally not masked off from size field in
  2126. + macros for which mmapped chunks should never be seen. This should
  2127. + cause helpful core dumps to occur if it is tried by accident by
  2128. + people extending or adapting this malloc.
  2129. +*/
  2130. +#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
  2131. +
  2132. +/* Get size, ignoring use bits */
  2133. +#define chunksize(p) ((p)->size & ~(SIZE_BITS))
  2134. +
  2135. +
  2136. +/* Ptr to next physical malloc_chunk. */
  2137. +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
  2138. +
  2139. +/* Ptr to previous physical malloc_chunk */
  2140. +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
  2141. +
  2142. +/* Treat space at ptr + offset as a chunk */
  2143. +#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
  2144. +
  2145. +/* extract p's inuse bit */
  2146. +#define inuse(p)\
  2147. +((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
  2148. +
  2149. +/* set/clear chunk as being inuse without otherwise disturbing */
  2150. +#define set_inuse(p)\
  2151. +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
  2152. +
  2153. +#define clear_inuse(p)\
  2154. +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
  2155. +
  2156. +
  2157. +/* check/set/clear inuse bits in known places */
  2158. +#define inuse_bit_at_offset(p, s)\
  2159. + (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
  2160. +
  2161. +#define set_inuse_bit_at_offset(p, s)\
  2162. + (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
  2163. +
  2164. +#define clear_inuse_bit_at_offset(p, s)\
  2165. + (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
  2166. +
  2167. +
  2168. +/* Set size at head, without disturbing its use bit */
  2169. +#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
  2170. +
  2171. +/* Set size/use field */
  2172. +#define set_head(p, s) ((p)->size = (s))
  2173. +
  2174. +/* Set size at footer (only when chunk is not in use) */
  2175. +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
  2176. +
  2177. +
  2178. +/*
  2179. + -------------------- Internal data structures --------------------
  2180. +
  2181. + All internal state is held in an instance of malloc_state defined
  2182. + below. There are no other static variables, except in two optional
  2183. + cases:
  2184. + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
  2185. + * If HAVE_MMAP is true, but mmap doesn't support
  2186. + MAP_ANONYMOUS, a dummy file descriptor for mmap.
  2187. +
  2188. + Beware of lots of tricks that minimize the total bookkeeping space
  2189. + requirements. The result is a little over 1K bytes (for 4byte
  2190. + pointers and size_t.)
  2191. +*/
  2192. +
  2193. +/*
  2194. + Bins
  2195. +
  2196. + An array of bin headers for free chunks. Each bin is doubly
  2197. + linked. The bins are approximately proportionally (log) spaced.
  2198. + There are a lot of these bins (128). This may look excessive, but
  2199. + works very well in practice. Most bins hold sizes that are
  2200. + unusual as malloc request sizes, but are more usual for fragments
  2201. + and consolidated sets of chunks, which is what these bins hold, so
  2202. + they can be found quickly. All procedures maintain the invariant
  2203. + that no consolidated chunk physically borders another one, so each
  2204. + chunk in a list is known to be preceded and followed by either
  2205. + inuse chunks or the ends of memory.
  2206. +
  2207. + Chunks in bins are kept in size order, with ties going to the
  2208. + approximately least recently used chunk. Ordering isn't needed
  2209. + for the small bins, which all contain the same-sized chunks, but
  2210. + facilitates best-fit allocation for larger chunks. These lists
  2211. + are just sequential. Keeping them in order almost never requires
  2212. + enough traversal to warrant using fancier ordered data
  2213. + structures.
  2214. +
  2215. + Chunks of the same size are linked with the most
  2216. + recently freed at the front, and allocations are taken from the
  2217. + back. This results in LRU (FIFO) allocation order, which tends
  2218. + to give each chunk an equal opportunity to be consolidated with
  2219. + adjacent freed chunks, resulting in larger free chunks and less
  2220. + fragmentation.
  2221. +
  2222. + To simplify use in double-linked lists, each bin header acts
  2223. + as a malloc_chunk. This avoids special-casing for headers.
  2224. + But to conserve space and improve locality, we allocate
  2225. + only the fd/bk pointers of bins, and then use repositioning tricks
  2226. + to treat these as the fields of a malloc_chunk*.
  2227. +*/
  2228. +
  2229. +typedef struct malloc_chunk* mbinptr;
  2230. +
  2231. +/* addressing -- note that bin_at(0) does not exist */
  2232. +#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
  2233. +
  2234. +/* analog of ++bin */
  2235. +#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
  2236. +
  2237. +/* Reminders about list directionality within bins */
  2238. +#define first(b) ((b)->fd)
  2239. +#define last(b) ((b)->bk)
  2240. +
  2241. +/* Take a chunk off a bin list */
  2242. +#define unlink(P, BK, FD) { \
  2243. + FD = P->fd; \
  2244. + BK = P->bk; \
  2245. + FD->bk = BK; \
  2246. + BK->fd = FD; \
  2247. +}
  2248. +
  2249. +/*
  2250. + Indexing
  2251. +
  2252. + Bins for sizes < 512 bytes contain chunks of all the same size, spaced
  2253. + 8 bytes apart. Larger bins are approximately logarithmically spaced:
  2254. +
  2255. + 64 bins of size 8
  2256. + 32 bins of size 64
  2257. + 16 bins of size 512
  2258. + 8 bins of size 4096
  2259. + 4 bins of size 32768
  2260. + 2 bins of size 262144
  2261. + 1 bin of size what's left
  2262. +
  2263. + There is actually a little bit of slop in the numbers in bin_index
  2264. + for the sake of speed. This makes no difference elsewhere.
  2265. +
  2266. + The bins top out around 1MB because we expect to service large
  2267. + requests via mmap.
  2268. +*/
  2269. +
  2270. +#define NBINS 128
  2271. +#define NSMALLBINS 64
  2272. +#define SMALLBIN_WIDTH 8
  2273. +#define MIN_LARGE_SIZE 512
  2274. +
  2275. +#define in_smallbin_range(sz) \
  2276. + ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
  2277. +
  2278. +#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
  2279. +
  2280. +#define largebin_index(sz) \
  2281. +(((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
  2282. + ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
  2283. + ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
  2284. + ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
  2285. + ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
  2286. + 126)
  2287. +
  2288. +#define bin_index(sz) \
  2289. + ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
  2290. +
  2291. +
  2292. +/*
  2293. + Unsorted chunks
  2294. +
  2295. + All remainders from chunk splits, as well as all returned chunks,
  2296. + are first placed in the "unsorted" bin. They are then placed
  2297. + in regular bins after malloc gives them ONE chance to be used before
  2298. + binning. So, basically, the unsorted_chunks list acts as a queue,
  2299. + with chunks being placed on it in free (and malloc_consolidate),
  2300. + and taken off (to be either used or placed in bins) in malloc.
  2301. +*/
  2302. +
  2303. +/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
  2304. +#define unsorted_chunks(M) (bin_at(M, 1))
  2305. +
  2306. +/*
  2307. + Top
  2308. +
  2309. + The top-most available chunk (i.e., the one bordering the end of
  2310. + available memory) is treated specially. It is never included in
  2311. + any bin, is used only if no other chunk is available, and is
  2312. + released back to the system if it is very large (see
  2313. + M_TRIM_THRESHOLD). Because top initially
  2314. + points to its own bin with initial zero size, thus forcing
  2315. + extension on the first malloc request, we avoid having any special
  2316. + code in malloc to check whether it even exists yet. But we still
  2317. + need to do so when getting memory from system, so we make
  2318. + initial_top treat the bin as a legal but unusable chunk during the
  2319. + interval between initialization and the first call to
  2320. + sYSMALLOc. (This is somewhat delicate, since it relies on
  2321. + the 2 preceding words to be zero during this interval as well.)
  2322. +*/
  2323. +
  2324. +/* Conveniently, the unsorted bin can be used as dummy top on first call */
  2325. +#define initial_top(M) (unsorted_chunks(M))
  2326. +
  2327. +/*
  2328. + Binmap
  2329. +
  2330. + To help compensate for the large number of bins, a one-level index
  2331. + structure is used for bin-by-bin searching. `binmap' is a
  2332. + bitvector recording whether bins are definitely empty so they can
  2333. + be skipped over during during traversals. The bits are NOT always
  2334. + cleared as soon as bins are empty, but instead only
  2335. + when they are noticed to be empty during traversal in malloc.
  2336. +*/
  2337. +
  2338. +/* Conservatively use 32 bits per map word, even if on 64bit system */
  2339. +#define BINMAPSHIFT 5
  2340. +#define BITSPERMAP (1U << BINMAPSHIFT)
  2341. +#define BINMAPSIZE (NBINS / BITSPERMAP)
  2342. +
  2343. +#define idx2block(i) ((i) >> BINMAPSHIFT)
  2344. +#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
  2345. +
  2346. +#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
  2347. +#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
  2348. +#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
  2349. +
  2350. +/*
  2351. + Fastbins
  2352. +
  2353. + An array of lists holding recently freed small chunks. Fastbins
  2354. + are not doubly linked. It is faster to single-link them, and
  2355. + since chunks are never removed from the middles of these lists,
  2356. + double linking is not necessary. Also, unlike regular bins, they
  2357. + are not even processed in FIFO order (they use faster LIFO) since
  2358. + ordering doesn't much matter in the transient contexts in which
  2359. + fastbins are normally used.
  2360. +
  2361. + Chunks in fastbins keep their inuse bit set, so they cannot
  2362. + be consolidated with other free chunks. malloc_consolidate
  2363. + releases all chunks in fastbins and consolidates them with
  2364. + other free chunks.
  2365. +*/
  2366. +
  2367. +typedef struct malloc_chunk* mfastbinptr;
  2368. +
  2369. +/* offset 2 to use otherwise unindexable first 2 bins */
  2370. +#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
  2371. +
  2372. +/* The maximum fastbin request size we support */
  2373. +#define MAX_FAST_SIZE 80
  2374. +
  2375. +#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
  2376. +
  2377. +/*
  2378. + FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
  2379. + that triggers automatic consolidation of possibly-surrounding
  2380. + fastbin chunks. This is a heuristic, so the exact value should not
  2381. + matter too much. It is defined at half the default trim threshold as a
  2382. + compromise heuristic to only attempt consolidation if it is likely
  2383. + to lead to trimming. However, it is not dynamically tunable, since
  2384. + consolidation reduces fragmentation surrounding loarge chunks even
  2385. + if trimming is not used.
  2386. +*/
  2387. +
  2388. +#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
  2389. +
  2390. +/*
  2391. + Since the lowest 2 bits in max_fast don't matter in size comparisons,
  2392. + they are used as flags.
  2393. +*/
  2394. +
  2395. +/*
  2396. + FASTCHUNKS_BIT held in max_fast indicates that there are probably
  2397. + some fastbin chunks. It is set true on entering a chunk into any
  2398. + fastbin, and cleared only in malloc_consolidate.
  2399. +
  2400. + The truth value is inverted so that have_fastchunks will be true
  2401. + upon startup (since statics are zero-filled), simplifying
  2402. + initialization checks.
  2403. +*/
  2404. +
  2405. +#define FASTCHUNKS_BIT (1U)
  2406. +
  2407. +#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
  2408. +#define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
  2409. +#define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
  2410. +
  2411. +/*
  2412. + NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
  2413. + regions. Otherwise, contiguity is exploited in merging together,
  2414. + when possible, results from consecutive MORECORE calls.
  2415. +
  2416. + The initial value comes from MORECORE_CONTIGUOUS, but is
  2417. + changed dynamically if mmap is ever used as an sbrk substitute.
  2418. +*/
  2419. +
  2420. +#define NONCONTIGUOUS_BIT (2U)
  2421. +
  2422. +#define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
  2423. +#define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
  2424. +#define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
  2425. +#define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
  2426. +
  2427. +/*
  2428. + Set value of max_fast.
  2429. + Use impossibly small value if 0.
  2430. + Precondition: there are no existing fastbin chunks.
  2431. + Setting the value clears fastchunk bit but preserves noncontiguous bit.
  2432. +*/
  2433. +
  2434. +#define set_max_fast(M, s) \
  2435. + (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
  2436. + FASTCHUNKS_BIT | \
  2437. + ((M)->max_fast & NONCONTIGUOUS_BIT)
  2438. +
  2439. +
  2440. +/*
  2441. + ----------- Internal state representation and initialization -----------
  2442. +*/
  2443. +
  2444. +struct malloc_state {
  2445. +
  2446. + /* The maximum chunk size to be eligible for fastbin */
  2447. + INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
  2448. +
  2449. + /* Fastbins */
  2450. + mfastbinptr fastbins[NFASTBINS];
  2451. +
  2452. + /* Base of the topmost chunk -- not otherwise kept in a bin */
  2453. + mchunkptr top;
  2454. +
  2455. + /* The remainder from the most recent split of a small request */
  2456. + mchunkptr last_remainder;
  2457. +
  2458. + /* Normal bins packed as described above */
  2459. + mchunkptr bins[NBINS * 2];
  2460. +
  2461. + /* Bitmap of bins */
  2462. + unsigned int binmap[BINMAPSIZE];
  2463. +
  2464. + /* Tunable parameters */
  2465. + unsigned long trim_threshold;
  2466. + INTERNAL_SIZE_T top_pad;
  2467. + INTERNAL_SIZE_T mmap_threshold;
  2468. +
  2469. + /* Memory map support */
  2470. + int n_mmaps;
  2471. + int n_mmaps_max;
  2472. + int max_n_mmaps;
  2473. +
  2474. + /* Cache malloc_getpagesize */
  2475. + unsigned int pagesize;
  2476. +
  2477. + /* Statistics */
  2478. + INTERNAL_SIZE_T mmapped_mem;
  2479. + INTERNAL_SIZE_T sbrked_mem;
  2480. + INTERNAL_SIZE_T max_sbrked_mem;
  2481. + INTERNAL_SIZE_T max_mmapped_mem;
  2482. + INTERNAL_SIZE_T max_total_mem;
  2483. +};
  2484. +
  2485. +typedef struct malloc_state *mstate;
  2486. +
  2487. +/*
  2488. + There is exactly one instance of this struct in this malloc.
  2489. + If you are adapting this malloc in a way that does NOT use a static
  2490. + malloc_state, you MUST explicitly zero-fill it before using. This
  2491. + malloc relies on the property that malloc_state is initialized to
  2492. + all zeroes (as is true of C statics).
  2493. +*/
  2494. +
  2495. +static struct malloc_state av_; /* never directly referenced */
  2496. +
  2497. +/*
  2498. + All uses of av_ are via get_malloc_state().
  2499. + At most one "call" to get_malloc_state is made per invocation of
  2500. + the public versions of malloc and free, but other routines
  2501. + that in turn invoke malloc and/or free may call more then once.
  2502. + Also, it is called in check* routines if DEBUG is set.
  2503. +*/
  2504. +
  2505. +#define get_malloc_state() (&(av_))
  2506. +
  2507. +/*
  2508. + Initialize a malloc_state struct.
  2509. +
  2510. + This is called only from within malloc_consolidate, which needs
  2511. + be called in the same contexts anyway. It is never called directly
  2512. + outside of malloc_consolidate because some optimizing compilers try
  2513. + to inline it at all call points, which turns out not to be an
  2514. + optimization at all. (Inlining it in malloc_consolidate is fine though.)
  2515. +*/
  2516. +
  2517. +#if __STD_C
  2518. +static void malloc_init_state(mstate av)
  2519. +#else
  2520. +static void malloc_init_state(av) mstate av;
  2521. +#endif
  2522. +{
  2523. + int i;
  2524. + mbinptr bin;
  2525. +
  2526. + /* Establish circular links for normal bins */
  2527. + for (i = 1; i < NBINS; ++i) {
  2528. + bin = bin_at(av,i);
  2529. + bin->fd = bin->bk = bin;
  2530. + }
  2531. +
  2532. + av->top_pad = DEFAULT_TOP_PAD;
  2533. + av->n_mmaps_max = DEFAULT_MMAP_MAX;
  2534. + av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2535. + av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2536. +
  2537. +#if !MORECORE_CONTIGUOUS
  2538. + set_noncontiguous(av);
  2539. +#endif
  2540. +
  2541. + set_max_fast(av, DEFAULT_MXFAST);
  2542. +
  2543. + av->top = initial_top(av);
  2544. + av->pagesize = malloc_getpagesize;
  2545. +}
  2546. +
  2547. +/*
  2548. + Other internal utilities operating on mstates
  2549. +*/
  2550. +
  2551. +#if __STD_C
  2552. +static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
  2553. +static int sYSTRIm(size_t, mstate);
  2554. +static void malloc_consolidate(mstate);
  2555. +static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
  2556. +#else
  2557. +static Void_t* sYSMALLOc();
  2558. +static int sYSTRIm();
  2559. +static void malloc_consolidate();
  2560. +static Void_t** iALLOc();
  2561. +#endif
  2562. +
  2563. +/*
  2564. + Debugging support
  2565. +
  2566. + These routines make a number of assertions about the states
  2567. + of data structures that should be true at all times. If any
  2568. + are not true, it's very likely that a user program has somehow
  2569. + trashed memory. (It's also possible that there is a coding error
  2570. + in malloc. In which case, please report it!)
  2571. +*/
  2572. +
  2573. +#ifndef DEBUG
  2574. +
  2575. +#define check_chunk(P)
  2576. +#define check_free_chunk(P)
  2577. +#define check_inuse_chunk(P)
  2578. +#define check_remalloced_chunk(P,N)
  2579. +#define check_malloced_chunk(P,N)
  2580. +#define check_malloc_state()
  2581. +
  2582. +#else
  2583. +#define check_chunk(P) do_check_chunk(P)
  2584. +#define check_free_chunk(P) do_check_free_chunk(P)
  2585. +#define check_inuse_chunk(P) do_check_inuse_chunk(P)
  2586. +#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
  2587. +#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
  2588. +#define check_malloc_state() do_check_malloc_state()
  2589. +
  2590. +/*
  2591. + Properties of all chunks
  2592. +*/
  2593. +
  2594. +INLINE
  2595. +#if __STD_C
  2596. +static void do_check_chunk(mchunkptr p)
  2597. +#else
  2598. +static void do_check_chunk(p) mchunkptr p;
  2599. +#endif
  2600. +{
  2601. + mstate av = get_malloc_state();
  2602. + unsigned long sz = chunksize(p);
  2603. + /* min and max possible addresses assuming contiguous allocation */
  2604. + char* max_address = (char*)(av->top) + chunksize(av->top);
  2605. + char* min_address = max_address - av->sbrked_mem;
  2606. +
  2607. + if (!chunk_is_mmapped(p)) {
  2608. +
  2609. + /* Has legal address ... */
  2610. + if (p != av->top) {
  2611. + if (contiguous(av)) {
  2612. + assert(((char*)p) >= min_address);
  2613. + assert(((char*)p + sz) <= ((char*)(av->top)));
  2614. + }
  2615. + }
  2616. + else {
  2617. + /* top size is always at least MINSIZE */
  2618. + assert((unsigned long)(sz) >= MINSIZE);
  2619. + /* top predecessor always marked inuse */
  2620. + assert(prev_inuse(p));
  2621. + }
  2622. +
  2623. + }
  2624. + else {
  2625. +#if HAVE_MMAP
  2626. + /* address is outside main heap */
  2627. + if (contiguous(av) && av->top != initial_top(av)) {
  2628. + assert(((char*)p) < min_address || ((char*)p) > max_address);
  2629. + }
  2630. + /* chunk is page-aligned */
  2631. + assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
  2632. + /* mem is aligned */
  2633. + assert(aligned_OK(chunk2mem(p)));
  2634. +#else
  2635. + /* force an appropriate assert violation if debug set */
  2636. + assert(!chunk_is_mmapped(p));
  2637. +#endif
  2638. + }
  2639. +}
  2640. +
  2641. +/*
  2642. + Properties of free chunks
  2643. +*/
  2644. +
  2645. +INLINE
  2646. +#if __STD_C
  2647. +static void do_check_free_chunk(mchunkptr p)
  2648. +#else
  2649. +static void do_check_free_chunk(p) mchunkptr p;
  2650. +#endif
  2651. +{
  2652. + mstate av = get_malloc_state();
  2653. +
  2654. + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
  2655. + mchunkptr next = chunk_at_offset(p, sz);
  2656. +
  2657. + do_check_chunk(p);
  2658. +
  2659. + /* Chunk must claim to be free ... */
  2660. + assert(!inuse(p));
  2661. + assert (!chunk_is_mmapped(p));
  2662. +
  2663. + /* Unless a special marker, must have OK fields */
  2664. + if ((unsigned long)(sz) >= MINSIZE)
  2665. + {
  2666. + assert((sz & MALLOC_ALIGN_MASK) == 0);
  2667. + assert(aligned_OK(chunk2mem(p)));
  2668. + /* ... matching footer field */
  2669. + assert(next->prev_size == sz);
  2670. + /* ... and is fully consolidated */
  2671. + assert(prev_inuse(p));
  2672. + assert (next == av->top || inuse(next));
  2673. +
  2674. + /* ... and has minimally sane links */
  2675. + assert(p->fd->bk == p);
  2676. + assert(p->bk->fd == p);
  2677. + }
  2678. + else /* markers are always of size SIZE_SZ */
  2679. + assert(sz == SIZE_SZ);
  2680. +}
  2681. +
  2682. +/*
  2683. + Properties of inuse chunks
  2684. +*/
  2685. +
  2686. +INLINE
  2687. +#if __STD_C
  2688. +static void do_check_inuse_chunk(mchunkptr p)
  2689. +#else
  2690. +static void do_check_inuse_chunk(p) mchunkptr p;
  2691. +#endif
  2692. +{
  2693. + mstate av = get_malloc_state();
  2694. + mchunkptr next;
  2695. + do_check_chunk(p);
  2696. +
  2697. + if (chunk_is_mmapped(p))
  2698. + return; /* mmapped chunks have no next/prev */
  2699. +
  2700. + /* Check whether it claims to be in use ... */
  2701. + assert(inuse(p));
  2702. +
  2703. + next = next_chunk(p);
  2704. +
  2705. + /* ... and is surrounded by OK chunks.
  2706. + Since more things can be checked with free chunks than inuse ones,
  2707. + if an inuse chunk borders them and debug is on, it's worth doing them.
  2708. + */
  2709. + if (!prev_inuse(p)) {
  2710. + /* Note that we cannot even look at prev unless it is not inuse */
  2711. + mchunkptr prv = prev_chunk(p);
  2712. + assert(next_chunk(prv) == p);
  2713. + do_check_free_chunk(prv);
  2714. + }
  2715. +
  2716. + if (next == av->top) {
  2717. + assert(prev_inuse(next));
  2718. + assert(chunksize(next) >= MINSIZE);
  2719. + }
  2720. + else if (!inuse(next))
  2721. + do_check_free_chunk(next);
  2722. +}
  2723. +
  2724. +/*
  2725. + Properties of chunks recycled from fastbins
  2726. +*/
  2727. +
  2728. +INLINE
  2729. +#if __STD_C
  2730. +static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
  2731. +#else
  2732. +static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
  2733. +#endif
  2734. +{
  2735. + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
  2736. +
  2737. + do_check_inuse_chunk(p);
  2738. +
  2739. + /* Legal size ... */
  2740. + assert((sz & MALLOC_ALIGN_MASK) == 0);
  2741. + assert((unsigned long)(sz) >= MINSIZE);
  2742. + /* ... and alignment */
  2743. + assert(aligned_OK(chunk2mem(p)));
  2744. + /* chunk is less than MINSIZE more than request */
  2745. + assert((long)(sz) - (long)(s) >= 0);
  2746. + assert((long)(sz) - (long)(s + MINSIZE) < 0);
  2747. +}
  2748. +
  2749. +/*
  2750. + Properties of nonrecycled chunks at the point they are malloced
  2751. +*/
  2752. +
  2753. +INLINE
  2754. +#if __STD_C
  2755. +static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
  2756. +#else
  2757. +static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
  2758. +#endif
  2759. +{
  2760. + /* same as recycled case ... */
  2761. + do_check_remalloced_chunk(p, s);
  2762. +
  2763. + /*
  2764. + ... plus, must obey implementation invariant that prev_inuse is
  2765. + always true of any allocated chunk; i.e., that each allocated
  2766. + chunk borders either a previously allocated and still in-use
  2767. + chunk, or the base of its memory arena. This is ensured
  2768. + by making all allocations from the the `lowest' part of any found
  2769. + chunk. This does not necessarily hold however for chunks
  2770. + recycled via fastbins.
  2771. + */
  2772. +
  2773. + assert(prev_inuse(p));
  2774. +}
  2775. +
  2776. +
  2777. +/*
  2778. + Properties of malloc_state.
  2779. +
  2780. + This may be useful for debugging malloc, as well as detecting user
  2781. + programmer errors that somehow write into malloc_state.
  2782. +
  2783. + If you are extending or experimenting with this malloc, you can
  2784. + probably figure out how to hack this routine to print out or
  2785. + display chunk addresses, sizes, bins, and other instrumentation.
  2786. +*/
  2787. +
  2788. +static void do_check_malloc_state()
  2789. +{
  2790. + mstate av = get_malloc_state();
  2791. + int i;
  2792. + mchunkptr p;
  2793. + mchunkptr q;
  2794. + mbinptr b;
  2795. + unsigned int binbit;
  2796. + int empty;
  2797. + unsigned int idx;
  2798. + INTERNAL_SIZE_T size;
  2799. + unsigned long total = 0;
  2800. + int max_fast_bin;
  2801. +
  2802. + /* internal size_t must be no wider than pointer type */
  2803. + assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
  2804. +
  2805. + /* alignment is a power of 2 */
  2806. + assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
  2807. +
  2808. + /* cannot run remaining checks until fully initialized */
  2809. + if (av->top == 0 || av->top == initial_top(av))
  2810. + return;
  2811. +
  2812. + /* pagesize is a power of 2 */
  2813. + assert((av->pagesize & (av->pagesize-1)) == 0);
  2814. +
  2815. + /* properties of fastbins */
  2816. +
  2817. + /* max_fast is in allowed range */
  2818. + assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
  2819. +
  2820. + max_fast_bin = fastbin_index(av->max_fast);
  2821. +
  2822. + for (i = 0; i < NFASTBINS; ++i) {
  2823. + p = av->fastbins[i];
  2824. +
  2825. + /* all bins past max_fast are empty */
  2826. + if (i > max_fast_bin)
  2827. + assert(p == 0);
  2828. +
  2829. + while (p != 0) {
  2830. + /* each chunk claims to be inuse */
  2831. + do_check_inuse_chunk(p);
  2832. + total += chunksize(p);
  2833. + /* chunk belongs in this bin */
  2834. + assert(fastbin_index(chunksize(p)) == i);
  2835. + p = p->fd;
  2836. + }
  2837. + }
  2838. +
  2839. + if (total != 0)
  2840. + assert(have_fastchunks(av));
  2841. + else if (!have_fastchunks(av))
  2842. + assert(total == 0);
  2843. +
  2844. + /* check normal bins */
  2845. + for (i = 1; i < NBINS; ++i) {
  2846. + b = bin_at(av,i);
  2847. +
  2848. + /* binmap is accurate (except for bin 1 == unsorted_chunks) */
  2849. + if (i >= 2) {
  2850. + binbit = get_binmap(av,i);
  2851. + empty = last(b) == b;
  2852. + if (!binbit)
  2853. + assert(empty);
  2854. + else if (!empty)
  2855. + assert(binbit);
  2856. + }
  2857. +
  2858. + for (p = last(b); p != b; p = p->bk) {
  2859. + /* each chunk claims to be free */
  2860. + do_check_free_chunk(p);
  2861. + size = chunksize(p);
  2862. + total += size;
  2863. + if (i >= 2) {
  2864. + /* chunk belongs in bin */
  2865. + idx = bin_index(size);
  2866. + assert(idx == i);
  2867. + /* lists are sorted */
  2868. + assert(p->bk == b ||
  2869. + (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
  2870. + }
  2871. + /* chunk is followed by a legal chain of inuse chunks */
  2872. + for (q = next_chunk(p);
  2873. + (q != av->top && inuse(q) &&
  2874. + (unsigned long)(chunksize(q)) >= MINSIZE);
  2875. + q = next_chunk(q))
  2876. + do_check_inuse_chunk(q);
  2877. + }
  2878. + }
  2879. +
  2880. + /* top chunk is OK */
  2881. + check_chunk(av->top);
  2882. +
  2883. + /* sanity checks for statistics */
  2884. +
  2885. + assert(total <= (unsigned long)(av->max_total_mem));
  2886. + assert(av->n_mmaps >= 0);
  2887. + assert(av->n_mmaps <= av->n_mmaps_max);
  2888. + assert(av->n_mmaps <= av->max_n_mmaps);
  2889. +
  2890. + assert((unsigned long)(av->sbrked_mem) <=
  2891. + (unsigned long)(av->max_sbrked_mem));
  2892. +
  2893. + assert((unsigned long)(av->mmapped_mem) <=
  2894. + (unsigned long)(av->max_mmapped_mem));
  2895. +
  2896. + assert((unsigned long)(av->max_total_mem) >=
  2897. + (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
  2898. +}
  2899. +#endif
  2900. +
  2901. +
  2902. +/* ----------- Routines dealing with system allocation -------------- */
  2903. +
  2904. +/*
  2905. + sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
  2906. + to the system (via negative arguments to sbrk) if there is unused
  2907. + memory at the `high' end of the malloc pool. It is called
  2908. + automatically by free() when top space exceeds the trim
  2909. + threshold. It is also called by the public malloc_trim routine. It
  2910. + returns 1 if it actually released any memory, else 0.
  2911. +*/
  2912. +
  2913. +INLINE
  2914. +#if __STD_C
  2915. +static int sYSTRIm(size_t pad, mstate av)
  2916. +#else
  2917. +static int sYSTRIm(pad, av) size_t pad; mstate av;
  2918. +#endif
  2919. +{
  2920. + long top_size; /* Amount of top-most memory */
  2921. + long extra; /* Amount to release */
  2922. + long released; /* Amount actually released */
  2923. + char* current_brk; /* address returned by pre-check sbrk call */
  2924. + char* new_brk; /* address returned by post-check sbrk call */
  2925. + size_t pagesz;
  2926. +
  2927. + pagesz = av->pagesize;
  2928. + top_size = chunksize(av->top);
  2929. +
  2930. + /* Release in pagesize units, keeping at least one page */
  2931. + extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
  2932. +
  2933. + if (extra > 0) {
  2934. +
  2935. + /*
  2936. + Only proceed if end of memory is where we last set it.
  2937. + This avoids problems if there were foreign sbrk calls.
  2938. + */
  2939. + current_brk = (char*)(MORECORE(0));
  2940. + if (current_brk == (char*)(av->top) + top_size) {
  2941. +
  2942. + /*
  2943. + Attempt to release memory. We ignore MORECORE return value,
  2944. + and instead call again to find out where new end of memory is.
  2945. + This avoids problems if first call releases less than we asked,
  2946. + of if failure somehow altered brk value. (We could still
  2947. + encounter problems if it altered brk in some very bad way,
  2948. + but the only thing we can do is adjust anyway, which will cause
  2949. + some downstream failure.)
  2950. + */
  2951. +
  2952. + MORECORE(-extra);
  2953. + new_brk = (char*)(MORECORE(0));
  2954. +
  2955. + if (new_brk != (char*)MORECORE_FAILURE) {
  2956. + released = (long)(current_brk - new_brk);
  2957. +
  2958. + if (released != 0) {
  2959. + /* Success. Adjust top. */
  2960. + av->sbrked_mem -= released;
  2961. + set_head(av->top, (top_size - released) | PREV_INUSE);
  2962. + check_malloc_state();
  2963. + return 1;
  2964. + }
  2965. + }
  2966. + }
  2967. + }
  2968. + return 0;
  2969. +}
  2970. +
  2971. +/*
  2972. + ------------------------- malloc_consolidate -------------------------
  2973. +
  2974. + malloc_consolidate is a specialized version of free() that tears
  2975. + down chunks held in fastbins. Free itself cannot be used for this
  2976. + purpose since, among other things, it might place chunks back onto
  2977. + fastbins. So, instead, we need to use a minor variant of the same
  2978. + code.
  2979. +
  2980. + Also, because this routine needs to be called the first time through
  2981. + malloc anyway, it turns out to be the perfect place to trigger
  2982. + initialization code.
  2983. +*/
  2984. +
  2985. +INLINE
  2986. +#if __STD_C
  2987. +static void malloc_consolidate(mstate av)
  2988. +#else
  2989. +static void malloc_consolidate(av) mstate av;
  2990. +#endif
  2991. +{
  2992. + mfastbinptr* fb; /* current fastbin being consolidated */
  2993. + mfastbinptr* maxfb; /* last fastbin (for loop control) */
  2994. + mchunkptr p; /* current chunk being consolidated */
  2995. + mchunkptr nextp; /* next chunk to consolidate */
  2996. + mchunkptr unsorted_bin; /* bin header */
  2997. + mchunkptr first_unsorted; /* chunk to link to */
  2998. +
  2999. + /* These have same use as in free() */
  3000. + mchunkptr nextchunk;
  3001. + INTERNAL_SIZE_T size;
  3002. + INTERNAL_SIZE_T nextsize;
  3003. + INTERNAL_SIZE_T prevsize;
  3004. + int nextinuse;
  3005. + mchunkptr bck;
  3006. + mchunkptr fwd;
  3007. +
  3008. + /*
  3009. + If max_fast is 0, we know that av hasn't
  3010. + yet been initialized, in which case do so below
  3011. + */
  3012. +
  3013. + if (av->max_fast != 0) {
  3014. + clear_fastchunks(av);
  3015. +
  3016. + unsorted_bin = unsorted_chunks(av);
  3017. +
  3018. + /*
  3019. + Remove each chunk from fast bin and consolidate it, placing it
  3020. + then in unsorted bin. Among other reasons for doing this,
  3021. + placing in unsorted bin avoids needing to calculate actual bins
  3022. + until malloc is sure that chunks aren't immediately going to be
  3023. + reused anyway.
  3024. + */
  3025. +
  3026. + maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
  3027. + fb = &(av->fastbins[0]);
  3028. + do {
  3029. + if ( (p = *fb) != 0) {
  3030. + *fb = 0;
  3031. +
  3032. + do {
  3033. + check_inuse_chunk(p);
  3034. + nextp = p->fd;
  3035. +
  3036. + /* Slightly streamlined version of consolidation code in free() */
  3037. + size = p->size & ~PREV_INUSE;
  3038. + nextchunk = chunk_at_offset(p, size);
  3039. + nextsize = chunksize(nextchunk);
  3040. +
  3041. + if (!prev_inuse(p)) {
  3042. + prevsize = p->prev_size;
  3043. + size += prevsize;
  3044. + p = chunk_at_offset(p, -((long) prevsize));
  3045. + unlink(p, bck, fwd);
  3046. + }
  3047. +
  3048. + if (nextchunk != av->top) {
  3049. + nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  3050. + set_head(nextchunk, nextsize);
  3051. +
  3052. + if (!nextinuse) {
  3053. + size += nextsize;
  3054. + unlink(nextchunk, bck, fwd);
  3055. + }
  3056. +
  3057. + first_unsorted = unsorted_bin->fd;
  3058. + unsorted_bin->fd = p;
  3059. + first_unsorted->bk = p;
  3060. +
  3061. + set_head(p, size | PREV_INUSE);
  3062. + p->bk = unsorted_bin;
  3063. + p->fd = first_unsorted;
  3064. + set_foot(p, size);
  3065. + }
  3066. +
  3067. + else {
  3068. + size += nextsize;
  3069. + set_head(p, size | PREV_INUSE);
  3070. + av->top = p;
  3071. + }
  3072. +
  3073. + } while ( (p = nextp) != 0);
  3074. +
  3075. + }
  3076. + } while (fb++ != maxfb);
  3077. + }
  3078. + else {
  3079. + malloc_init_state(av);
  3080. + check_malloc_state();
  3081. + }
  3082. +}
  3083. +
  3084. +/*
  3085. + ------------------------------ free ------------------------------
  3086. +*/
  3087. +
  3088. +INLINE
  3089. +#if __STD_C
  3090. +void fREe(Void_t* mem)
  3091. +#else
  3092. +void fREe(mem) Void_t* mem;
  3093. +#endif
  3094. +{
  3095. + mstate av = get_malloc_state();
  3096. +
  3097. + mchunkptr p; /* chunk corresponding to mem */
  3098. + INTERNAL_SIZE_T size; /* its size */
  3099. + mfastbinptr* fb; /* associated fastbin */
  3100. + mchunkptr nextchunk; /* next contiguous chunk */
  3101. + INTERNAL_SIZE_T nextsize; /* its size */
  3102. + int nextinuse; /* true if nextchunk is used */
  3103. + INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
  3104. + mchunkptr bck; /* misc temp for linking */
  3105. + mchunkptr fwd; /* misc temp for linking */
  3106. +
  3107. +
  3108. + /* free(0) has no effect */
  3109. + if (mem != 0) {
  3110. + p = mem2chunk(mem);
  3111. + size = chunksize(p);
  3112. +
  3113. + check_inuse_chunk(p);
  3114. +
  3115. + /*
  3116. + If eligible, place chunk on a fastbin so it can be found
  3117. + and used quickly in malloc.
  3118. + */
  3119. +
  3120. + if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
  3121. +
  3122. +#if TRIM_FASTBINS
  3123. + /*
  3124. + If TRIM_FASTBINS set, don't place chunks
  3125. + bordering top into fastbins
  3126. + */
  3127. + && (chunk_at_offset(p, size) != av->top)
  3128. +#endif
  3129. + ) {
  3130. +
  3131. + set_fastchunks(av);
  3132. + fb = &(av->fastbins[fastbin_index(size)]);
  3133. + p->fd = *fb;
  3134. + *fb = p;
  3135. + }
  3136. +
  3137. + /*
  3138. + Consolidate other non-mmapped chunks as they arrive.
  3139. + */
  3140. +
  3141. + else if (!chunk_is_mmapped(p)) {
  3142. + nextchunk = chunk_at_offset(p, size);
  3143. + nextsize = chunksize(nextchunk);
  3144. +
  3145. + /* consolidate backward */
  3146. + if (!prev_inuse(p)) {
  3147. + prevsize = p->prev_size;
  3148. + size += prevsize;
  3149. + p = chunk_at_offset(p, -((long) prevsize));
  3150. + unlink(p, bck, fwd);
  3151. + }
  3152. +
  3153. + if (nextchunk != av->top) {
  3154. + /* get and clear inuse bit */
  3155. + nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  3156. + set_head(nextchunk, nextsize);
  3157. +
  3158. + /* consolidate forward */
  3159. + if (!nextinuse) {
  3160. + unlink(nextchunk, bck, fwd);
  3161. + size += nextsize;
  3162. + }
  3163. +
  3164. + /*
  3165. + Place the chunk in unsorted chunk list. Chunks are
  3166. + not placed into regular bins until after they have
  3167. + been given one chance to be used in malloc.
  3168. + */
  3169. +
  3170. + bck = unsorted_chunks(av);
  3171. + fwd = bck->fd;
  3172. + p->bk = bck;
  3173. + p->fd = fwd;
  3174. + bck->fd = p;
  3175. + fwd->bk = p;
  3176. +
  3177. + set_head(p, size | PREV_INUSE);
  3178. + set_foot(p, size);
  3179. +
  3180. + check_free_chunk(p);
  3181. + }
  3182. +
  3183. + /*
  3184. + If the chunk borders the current high end of memory,
  3185. + consolidate into top
  3186. + */
  3187. +
  3188. + else {
  3189. + size += nextsize;
  3190. + set_head(p, size | PREV_INUSE);
  3191. + av->top = p;
  3192. + check_chunk(p);
  3193. + }
  3194. +
  3195. + /*
  3196. + If freeing a large space, consolidate possibly-surrounding
  3197. + chunks. Then, if the total unused topmost memory exceeds trim
  3198. + threshold, ask malloc_trim to reduce top.
  3199. +
  3200. + Unless max_fast is 0, we don't know if there are fastbins
  3201. + bordering top, so we cannot tell for sure whether threshold
  3202. + has been reached unless fastbins are consolidated. But we
  3203. + don't want to consolidate on each free. As a compromise,
  3204. + consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
  3205. + is reached.
  3206. + */
  3207. +
  3208. + if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
  3209. + if (have_fastchunks(av))
  3210. + malloc_consolidate(av);
  3211. +
  3212. +#ifndef MORECORE_CANNOT_TRIM
  3213. + if ((unsigned long)(chunksize(av->top)) >=
  3214. + (unsigned long)(av->trim_threshold))
  3215. + sYSTRIm(av->top_pad, av);
  3216. +#endif
  3217. + }
  3218. +
  3219. + }
  3220. + /*
  3221. + If the chunk was allocated via mmap, release via munmap()
  3222. + Note that if HAVE_MMAP is false but chunk_is_mmapped is
  3223. + true, then user must have overwritten memory. There's nothing
  3224. + we can do to catch this error unless DEBUG is set, in which case
  3225. + check_inuse_chunk (above) will have triggered error.
  3226. + */
  3227. +
  3228. + else {
  3229. +#if HAVE_MMAP
  3230. + int ret;
  3231. + INTERNAL_SIZE_T offset = p->prev_size;
  3232. + av->n_mmaps--;
  3233. + av->mmapped_mem -= (size + offset);
  3234. + ret = munmap((char*)p - offset, size + offset);
  3235. + /* munmap returns non-zero on failure */
  3236. + assert(ret == 0);
  3237. +#endif
  3238. + }
  3239. + }
  3240. +}
  3241. +
  3242. +/*
  3243. + sysmalloc handles malloc cases requiring more memory from the system.
  3244. + On entry, it is assumed that av->top does not have enough
  3245. + space to service request for nb bytes, thus requiring that av->top
  3246. + be extended or replaced.
  3247. +*/
  3248. +
  3249. +INLINE
  3250. +#if __STD_C
  3251. +static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
  3252. +#else
  3253. +static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
  3254. +#endif
  3255. +{
  3256. + mchunkptr old_top; /* incoming value of av->top */
  3257. + INTERNAL_SIZE_T old_size; /* its size */
  3258. + char* old_end; /* its end address */
  3259. +
  3260. + long size; /* arg to first MORECORE or mmap call */
  3261. + char* brk; /* return value from MORECORE */
  3262. +
  3263. + long correction; /* arg to 2nd MORECORE call */
  3264. + char* snd_brk; /* 2nd return val */
  3265. +
  3266. + INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
  3267. + INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
  3268. + char* aligned_brk; /* aligned offset into brk */
  3269. +
  3270. + mchunkptr p; /* the allocated/returned chunk */
  3271. + mchunkptr remainder; /* remainder from allocation */
  3272. + unsigned long remainder_size; /* its size */
  3273. +
  3274. + unsigned long sum; /* for updating stats */
  3275. +
  3276. + size_t pagemask = av->pagesize - 1;
  3277. +
  3278. +
  3279. +#if HAVE_MMAP
  3280. +
  3281. + /*
  3282. + If have mmap, and the request size meets the mmap threshold, and
  3283. + the system supports mmap, and there are few enough currently
  3284. + allocated mmapped regions, try to directly map this request
  3285. + rather than expanding top.
  3286. + */
  3287. +
  3288. + if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
  3289. + (av->n_mmaps < av->n_mmaps_max)) {
  3290. +
  3291. + char* mm; /* return value from mmap call*/
  3292. +
  3293. + /*
  3294. + Round up size to nearest page. For mmapped chunks, the overhead
  3295. + is one SIZE_SZ unit larger than for normal chunks, because there
  3296. + is no following chunk whose prev_size field could be used.
  3297. + */
  3298. + size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
  3299. +
  3300. + /* Don't try if size wraps around 0 */
  3301. + if ((unsigned long)(size) > (unsigned long)(nb)) {
  3302. +
  3303. + mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
  3304. +
  3305. + if (mm != (char*)(MORECORE_FAILURE)) {
  3306. +
  3307. + /*
  3308. + The offset to the start of the mmapped region is stored
  3309. + in the prev_size field of the chunk. This allows us to adjust
  3310. + returned start address to meet alignment requirements here
  3311. + and in memalign(), and still be able to compute proper
  3312. + address argument for later munmap in free() and realloc().
  3313. + */
  3314. +
  3315. + front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
  3316. + if (front_misalign > 0) {
  3317. + correction = MALLOC_ALIGNMENT - front_misalign;
  3318. + p = (mchunkptr)(mm + correction);
  3319. + p->prev_size = correction;
  3320. + set_head(p, (size - correction) |IS_MMAPPED);
  3321. + }
  3322. + else {
  3323. + p = (mchunkptr)mm;
  3324. + p->prev_size = 0;
  3325. + set_head(p, size|IS_MMAPPED);
  3326. + }
  3327. +
  3328. + /* update statistics */
  3329. +
  3330. + if (++av->n_mmaps > av->max_n_mmaps)
  3331. + av->max_n_mmaps = av->n_mmaps;
  3332. +
  3333. + sum = av->mmapped_mem += size;
  3334. + if (sum > (unsigned long)(av->max_mmapped_mem))
  3335. + av->max_mmapped_mem = sum;
  3336. + sum += av->sbrked_mem;
  3337. + if (sum > (unsigned long)(av->max_total_mem))
  3338. + av->max_total_mem = sum;
  3339. +
  3340. + check_chunk(p);
  3341. +
  3342. + return chunk2mem(p);
  3343. + }
  3344. + }
  3345. + }
  3346. +#endif
  3347. +
  3348. + /* Record incoming configuration of top */
  3349. +
  3350. + old_top = av->top;
  3351. + old_size = chunksize(old_top);
  3352. + old_end = (char*)(chunk_at_offset(old_top, old_size));
  3353. +
  3354. + brk = snd_brk = (char*)(MORECORE_FAILURE);
  3355. +
  3356. + /*
  3357. + If not the first time through, we require old_size to be
  3358. + at least MINSIZE and to have prev_inuse set.
  3359. + */
  3360. +
  3361. + assert((old_top == initial_top(av) && old_size == 0) ||
  3362. + ((unsigned long) (old_size) >= MINSIZE &&
  3363. + prev_inuse(old_top)));
  3364. +
  3365. + /* Precondition: not enough current space to satisfy nb request */
  3366. + assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
  3367. +
  3368. + /* Precondition: all fastbins are consolidated */
  3369. + assert(!have_fastchunks(av));
  3370. +
  3371. +
  3372. + /* Request enough space for nb + pad + overhead */
  3373. +
  3374. + size = nb + av->top_pad + MINSIZE;
  3375. +
  3376. + /*
  3377. + If contiguous, we can subtract out existing space that we hope to
  3378. + combine with new space. We add it back later only if
  3379. + we don't actually get contiguous space.
  3380. + */
  3381. +
  3382. + if (contiguous(av))
  3383. + size -= old_size;
  3384. +
  3385. + /*
  3386. + Round to a multiple of page size.
  3387. + If MORECORE is not contiguous, this ensures that we only call it
  3388. + with whole-page arguments. And if MORECORE is contiguous and
  3389. + this is not first time through, this preserves page-alignment of
  3390. + previous calls. Otherwise, we correct to page-align below.
  3391. + */
  3392. +
  3393. + size = (size + pagemask) & ~pagemask;
  3394. +
  3395. + /*
  3396. + Don't try to call MORECORE if argument is so big as to appear
  3397. + negative. Note that since mmap takes size_t arg, it may succeed
  3398. + below even if we cannot call MORECORE.
  3399. + */
  3400. +
  3401. + if (size > 0)
  3402. + brk = (char*)(MORECORE(size));
  3403. +
  3404. + /*
  3405. + If have mmap, try using it as a backup when MORECORE fails or
  3406. + cannot be used. This is worth doing on systems that have "holes" in
  3407. + address space, so sbrk cannot extend to give contiguous space, but
  3408. + space is available elsewhere. Note that we ignore mmap max count
  3409. + and threshold limits, since the space will not be used as a
  3410. + segregated mmap region.
  3411. + */
  3412. +
  3413. +#if HAVE_MMAP
  3414. + if (brk == (char*)(MORECORE_FAILURE)) {
  3415. +
  3416. + /* Cannot merge with old top, so add its size back in */
  3417. + if (contiguous(av))
  3418. + size = (size + old_size + pagemask) & ~pagemask;
  3419. +
  3420. + /* If we are relying on mmap as backup, then use larger units */
  3421. + if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
  3422. + size = MMAP_AS_MORECORE_SIZE;
  3423. +
  3424. + /* Don't try if size wraps around 0 */
  3425. + if ((unsigned long)(size) > (unsigned long)(nb)) {
  3426. +
  3427. + brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
  3428. +
  3429. + if (brk != (char*)(MORECORE_FAILURE)) {
  3430. +
  3431. + /* We do not need, and cannot use, another sbrk call to find end */
  3432. + snd_brk = brk + size;
  3433. +
  3434. + /*
  3435. + Record that we no longer have a contiguous sbrk region.
  3436. + After the first time mmap is used as backup, we do not
  3437. + ever rely on contiguous space since this could incorrectly
  3438. + bridge regions.
  3439. + */
  3440. + set_noncontiguous(av);
  3441. + }
  3442. + }
  3443. + }
  3444. +#endif
  3445. +
  3446. + if (brk != (char*)(MORECORE_FAILURE)) {
  3447. + av->sbrked_mem += size;
  3448. +
  3449. + /*
  3450. + If MORECORE extends previous space, we can likewise extend top size.
  3451. + */
  3452. +
  3453. + if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
  3454. + set_head(old_top, (size + old_size) | PREV_INUSE);
  3455. + }
  3456. +
  3457. + /*
  3458. + Otherwise, make adjustments:
  3459. +
  3460. + * If the first time through or noncontiguous, we need to call sbrk
  3461. + just to find out where the end of memory lies.
  3462. +
  3463. + * We need to ensure that all returned chunks from malloc will meet
  3464. + MALLOC_ALIGNMENT
  3465. +
  3466. + * If there was an intervening foreign sbrk, we need to adjust sbrk
  3467. + request size to account for fact that we will not be able to
  3468. + combine new space with existing space in old_top.
  3469. +
  3470. + * Almost all systems internally allocate whole pages at a time, in
  3471. + which case we might as well use the whole last page of request.
  3472. + So we allocate enough more memory to hit a page boundary now,
  3473. + which in turn causes future contiguous calls to page-align.
  3474. + */
  3475. +
  3476. + else {
  3477. + front_misalign = 0;
  3478. + end_misalign = 0;
  3479. + correction = 0;
  3480. + aligned_brk = brk;
  3481. +
  3482. + /* handle contiguous cases */
  3483. + if (contiguous(av)) {
  3484. +
  3485. + /* Guarantee alignment of first new chunk made from this space */
  3486. +
  3487. + front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
  3488. + if (front_misalign > 0) {
  3489. +
  3490. + /*
  3491. + Skip over some bytes to arrive at an aligned position.
  3492. + We don't need to specially mark these wasted front bytes.
  3493. + They will never be accessed anyway because
  3494. + prev_inuse of av->top (and any chunk created from its start)
  3495. + is always true after initialization.
  3496. + */
  3497. +
  3498. + correction = MALLOC_ALIGNMENT - front_misalign;
  3499. + aligned_brk += correction;
  3500. + }
  3501. +
  3502. + /*
  3503. + If this isn't adjacent to existing space, then we will not
  3504. + be able to merge with old_top space, so must add to 2nd request.
  3505. + */
  3506. +
  3507. + correction += old_size;
  3508. +
  3509. + /* Extend the end address to hit a page boundary */
  3510. + end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
  3511. + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
  3512. +
  3513. + assert(correction >= 0);
  3514. + snd_brk = (char*)(MORECORE(correction));
  3515. +
  3516. + /*
  3517. + If can't allocate correction, try to at least find out current
  3518. + brk. It might be enough to proceed without failing.
  3519. +
  3520. + Note that if second sbrk did NOT fail, we assume that space
  3521. + is contiguous with first sbrk. This is a safe assumption unless
  3522. + program is multithreaded but doesn't use locks and a foreign sbrk
  3523. + occurred between our first and second calls.
  3524. + */
  3525. +
  3526. + if (snd_brk == (char*)(MORECORE_FAILURE)) {
  3527. + correction = 0;
  3528. + snd_brk = (char*)(MORECORE(0));
  3529. + }
  3530. + }
  3531. +
  3532. + /* handle non-contiguous cases */
  3533. + else {
  3534. + /* MORECORE/mmap must correctly align */
  3535. + assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
  3536. +
  3537. + /* Find out current end of memory */
  3538. + if (snd_brk == (char*)(MORECORE_FAILURE)) {
  3539. + snd_brk = (char*)(MORECORE(0));
  3540. + }
  3541. + }
  3542. +
  3543. + /* Adjust top based on results of second sbrk */
  3544. + if (snd_brk != (char*)(MORECORE_FAILURE)) {
  3545. + av->top = (mchunkptr)aligned_brk;
  3546. + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
  3547. + av->sbrked_mem += correction;
  3548. +
  3549. + /*
  3550. + If not the first time through, we either have a
  3551. + gap due to foreign sbrk or a non-contiguous region. Insert a
  3552. + double fencepost at old_top to prevent consolidation with space
  3553. + we don't own. These fenceposts are artificial chunks that are
  3554. + marked as inuse and are in any case too small to use. We need
  3555. + two to make sizes and alignments work out.
  3556. + */
  3557. +
  3558. + if (old_size != 0) {
  3559. + /*
  3560. + Shrink old_top to insert fenceposts, keeping size a
  3561. + multiple of MALLOC_ALIGNMENT. We know there is at least
  3562. + enough space in old_top to do this.
  3563. + */
  3564. + old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
  3565. + set_head(old_top, old_size | PREV_INUSE);
  3566. +
  3567. + /*
  3568. + Note that the following assignments completely overwrite
  3569. + old_top when old_size was previously MINSIZE. This is
  3570. + intentional. We need the fencepost, even if old_top otherwise gets
  3571. + lost.
  3572. + */
  3573. + chunk_at_offset(old_top, old_size )->size =
  3574. + SIZE_SZ|PREV_INUSE;
  3575. +
  3576. + chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
  3577. + SIZE_SZ|PREV_INUSE;
  3578. +
  3579. + /* If possible, release the rest. */
  3580. + if (old_size >= MINSIZE) {
  3581. + fREe(chunk2mem(old_top));
  3582. + }
  3583. +
  3584. + }
  3585. + }
  3586. + }
  3587. +
  3588. + /* Update statistics */
  3589. + sum = av->sbrked_mem;
  3590. + if (sum > (unsigned long)(av->max_sbrked_mem))
  3591. + av->max_sbrked_mem = sum;
  3592. +
  3593. + sum += av->mmapped_mem;
  3594. + if (sum > (unsigned long)(av->max_total_mem))
  3595. + av->max_total_mem = sum;
  3596. +
  3597. + check_malloc_state();
  3598. +
  3599. + /* finally, do the allocation */
  3600. + p = av->top;
  3601. + size = chunksize(p);
  3602. +
  3603. + /* check that one of the above allocation paths succeeded */
  3604. + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
  3605. + remainder_size = size - nb;
  3606. + remainder = chunk_at_offset(p, nb);
  3607. + av->top = remainder;
  3608. + set_head(p, nb | PREV_INUSE);
  3609. + set_head(remainder, remainder_size | PREV_INUSE);
  3610. + check_malloced_chunk(p, nb);
  3611. + return chunk2mem(p);
  3612. + }
  3613. + }
  3614. +
  3615. + /* catch all failure paths */
  3616. + MALLOC_FAILURE_ACTION;
  3617. + return 0;
  3618. +}
  3619. +
  3620. +
  3621. +/*
  3622. + ------------------------------ malloc ------------------------------
  3623. +*/
  3624. +
  3625. +INLINE
  3626. +#if __STD_C
  3627. +Void_t* mALLOc(size_t bytes)
  3628. +#else
  3629. + Void_t* mALLOc(bytes) size_t bytes;
  3630. +#endif
  3631. +{
  3632. + mstate av = get_malloc_state();
  3633. +
  3634. + INTERNAL_SIZE_T nb; /* normalized request size */
  3635. + unsigned int idx; /* associated bin index */
  3636. + mbinptr bin; /* associated bin */
  3637. + mfastbinptr* fb; /* associated fastbin */
  3638. +
  3639. + mchunkptr victim; /* inspected/selected chunk */
  3640. + INTERNAL_SIZE_T size; /* its size */
  3641. + int victim_index; /* its bin index */
  3642. +
  3643. + mchunkptr remainder; /* remainder from a split */
  3644. + unsigned long remainder_size; /* its size */
  3645. +
  3646. + unsigned int block; /* bit map traverser */
  3647. + unsigned int bit; /* bit map traverser */
  3648. + unsigned int map; /* current word of binmap */
  3649. +
  3650. + mchunkptr fwd; /* misc temp for linking */
  3651. + mchunkptr bck; /* misc temp for linking */
  3652. +
  3653. + /*
  3654. + Convert request size to internal form by adding SIZE_SZ bytes
  3655. + overhead plus possibly more to obtain necessary alignment and/or
  3656. + to obtain a size of at least MINSIZE, the smallest allocatable
  3657. + size. Also, checked_request2size traps (returning 0) request sizes
  3658. + that are so large that they wrap around zero when padded and
  3659. + aligned.
  3660. + */
  3661. +
  3662. + checked_request2size(bytes, nb);
  3663. +
  3664. + /*
  3665. + If the size qualifies as a fastbin, first check corresponding bin.
  3666. + This code is safe to execute even if av is not yet initialized, so we
  3667. + can try it without checking, which saves some time on this fast path.
  3668. + */
  3669. +
  3670. + if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
  3671. + fb = &(av->fastbins[(fastbin_index(nb))]);
  3672. + if ( (victim = *fb) != 0) {
  3673. + *fb = victim->fd;
  3674. + check_remalloced_chunk(victim, nb);
  3675. + return chunk2mem(victim);
  3676. + }
  3677. + }
  3678. +
  3679. + /*
  3680. + If a small request, check regular bin. Since these "smallbins"
  3681. + hold one size each, no searching within bins is necessary.
  3682. + (For a large request, we need to wait until unsorted chunks are
  3683. + processed to find best fit. But for small ones, fits are exact
  3684. + anyway, so we can check now, which is faster.)
  3685. + */
  3686. +
  3687. + if (in_smallbin_range(nb)) {
  3688. + idx = smallbin_index(nb);
  3689. + bin = bin_at(av,idx);
  3690. +
  3691. + if ( (victim = last(bin)) != bin) {
  3692. + if (victim == 0) /* initialization check */
  3693. + malloc_consolidate(av);
  3694. + else {
  3695. + bck = victim->bk;
  3696. + set_inuse_bit_at_offset(victim, nb);
  3697. + bin->bk = bck;
  3698. + bck->fd = bin;
  3699. +
  3700. + check_malloced_chunk(victim, nb);
  3701. + return chunk2mem(victim);
  3702. + }
  3703. + }
  3704. + }
  3705. +
  3706. + /*
  3707. + If this is a large request, consolidate fastbins before continuing.
  3708. + While it might look excessive to kill all fastbins before
  3709. + even seeing if there is space available, this avoids
  3710. + fragmentation problems normally associated with fastbins.
  3711. + Also, in practice, programs tend to have runs of either small or
  3712. + large requests, but less often mixtures, so consolidation is not
  3713. + invoked all that often in most programs. And the programs that
  3714. + it is called frequently in otherwise tend to fragment.
  3715. + */
  3716. +
  3717. + else {
  3718. + idx = largebin_index(nb);
  3719. + if (have_fastchunks(av))
  3720. + malloc_consolidate(av);
  3721. + }
  3722. +
  3723. + /*
  3724. + Process recently freed or remaindered chunks, taking one only if
  3725. + it is exact fit, or, if this a small request, the chunk is remainder from
  3726. + the most recent non-exact fit. Place other traversed chunks in
  3727. + bins. Note that this step is the only place in any routine where
  3728. + chunks are placed in bins.
  3729. +
  3730. + The outer loop here is needed because we might not realize until
  3731. + near the end of malloc that we should have consolidated, so must
  3732. + do so and retry. This happens at most once, and only when we would
  3733. + otherwise need to expand memory to service a "small" request.
  3734. + */
  3735. +
  3736. + for(;;) {
  3737. +
  3738. + while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
  3739. + bck = victim->bk;
  3740. + size = chunksize(victim);
  3741. +
  3742. + /*
  3743. + If a small request, try to use last remainder if it is the
  3744. + only chunk in unsorted bin. This helps promote locality for
  3745. + runs of consecutive small requests. This is the only
  3746. + exception to best-fit, and applies only when there is
  3747. + no exact fit for a small chunk.
  3748. + */
  3749. +
  3750. + if (in_smallbin_range(nb) &&
  3751. + bck == unsorted_chunks(av) &&
  3752. + victim == av->last_remainder &&
  3753. + (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
  3754. +
  3755. + /* split and reattach remainder */
  3756. + remainder_size = size - nb;
  3757. + remainder = chunk_at_offset(victim, nb);
  3758. + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
  3759. + av->last_remainder = remainder;
  3760. + remainder->bk = remainder->fd = unsorted_chunks(av);
  3761. +
  3762. + set_head(victim, nb | PREV_INUSE);
  3763. + set_head(remainder, remainder_size | PREV_INUSE);
  3764. + set_foot(remainder, remainder_size);
  3765. +
  3766. + check_malloced_chunk(victim, nb);
  3767. + return chunk2mem(victim);
  3768. + }
  3769. +
  3770. + /* remove from unsorted list */
  3771. + unsorted_chunks(av)->bk = bck;
  3772. + bck->fd = unsorted_chunks(av);
  3773. +
  3774. + /* Take now instead of binning if exact fit */
  3775. +
  3776. + if (size == nb) {
  3777. + set_inuse_bit_at_offset(victim, size);
  3778. + check_malloced_chunk(victim, nb);
  3779. + return chunk2mem(victim);
  3780. + }
  3781. +
  3782. + /* place chunk in bin */
  3783. +
  3784. + if (in_smallbin_range(size)) {
  3785. + victim_index = smallbin_index(size);
  3786. + bck = bin_at(av, victim_index);
  3787. + fwd = bck->fd;
  3788. + }
  3789. + else {
  3790. + victim_index = largebin_index(size);
  3791. + bck = bin_at(av, victim_index);
  3792. + fwd = bck->fd;
  3793. +
  3794. + /* maintain large bins in sorted order */
  3795. + if (fwd != bck) {
  3796. + size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
  3797. + /* if smaller than smallest, bypass loop below */
  3798. + if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
  3799. + fwd = bck;
  3800. + bck = bck->bk;
  3801. + }
  3802. + else {
  3803. + while ((unsigned long)(size) < (unsigned long)(fwd->size))
  3804. + fwd = fwd->fd;
  3805. + bck = fwd->bk;
  3806. + }
  3807. + }
  3808. + }
  3809. +
  3810. + mark_bin(av, victim_index);
  3811. + victim->bk = bck;
  3812. + victim->fd = fwd;
  3813. + fwd->bk = victim;
  3814. + bck->fd = victim;
  3815. + }
  3816. +
  3817. + /*
  3818. + If a large request, scan through the chunks of current bin in
  3819. + sorted order to find smallest that fits. This is the only step
  3820. + where an unbounded number of chunks might be scanned without doing
  3821. + anything useful with them. However the lists tend to be short.
  3822. + */
  3823. +
  3824. + if (!in_smallbin_range(nb)) {
  3825. + bin = bin_at(av, idx);
  3826. +
  3827. + /* skip scan if empty or largest chunk is too small */
  3828. + if ((victim = last(bin)) != bin &&
  3829. + (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
  3830. +
  3831. + while (((unsigned long)(size = chunksize(victim)) <
  3832. + (unsigned long)(nb)))
  3833. + victim = victim->bk;
  3834. +
  3835. + remainder_size = size - nb;
  3836. + unlink(victim, bck, fwd);
  3837. +
  3838. + /* Exhaust */
  3839. + if (remainder_size < MINSIZE) {
  3840. + set_inuse_bit_at_offset(victim, size);
  3841. + check_malloced_chunk(victim, nb);
  3842. + return chunk2mem(victim);
  3843. + }
  3844. + /* Split */
  3845. + else {
  3846. + remainder = chunk_at_offset(victim, nb);
  3847. + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
  3848. + remainder->bk = remainder->fd = unsorted_chunks(av);
  3849. + set_head(victim, nb | PREV_INUSE);
  3850. + set_head(remainder, remainder_size | PREV_INUSE);
  3851. + set_foot(remainder, remainder_size);
  3852. + check_malloced_chunk(victim, nb);
  3853. + return chunk2mem(victim);
  3854. + }
  3855. + }
  3856. + }
  3857. +
  3858. + /*
  3859. + Search for a chunk by scanning bins, starting with next largest
  3860. + bin. This search is strictly by best-fit; i.e., the smallest
  3861. + (with ties going to approximately the least recently used) chunk
  3862. + that fits is selected.
  3863. +
  3864. + The bitmap avoids needing to check that most blocks are nonempty.
  3865. + The particular case of skipping all bins during warm-up phases
  3866. + when no chunks have been returned yet is faster than it might look.
  3867. + */
  3868. +
  3869. + ++idx;
  3870. + bin = bin_at(av,idx);
  3871. + block = idx2block(idx);
  3872. + map = av->binmap[block];
  3873. + bit = idx2bit(idx);
  3874. +
  3875. + for (;;) {
  3876. +
  3877. + /* Skip rest of block if there are no more set bits in this block. */
  3878. + if (bit > map || bit == 0) {
  3879. + do {
  3880. + if (++block >= BINMAPSIZE) /* out of bins */
  3881. + goto use_top;
  3882. + } while ( (map = av->binmap[block]) == 0);
  3883. +
  3884. + bin = bin_at(av, (block << BINMAPSHIFT));
  3885. + bit = 1;
  3886. + }
  3887. +
  3888. + /* Advance to bin with set bit. There must be one. */
  3889. + while ((bit & map) == 0) {
  3890. + bin = next_bin(bin);
  3891. + bit <<= 1;
  3892. + assert(bit != 0);
  3893. + }
  3894. +
  3895. + /* Inspect the bin. It is likely to be non-empty */
  3896. + victim = last(bin);
  3897. +
  3898. + /* If a false alarm (empty bin), clear the bit. */
  3899. + if (victim == bin) {
  3900. + av->binmap[block] = map &= ~bit; /* Write through */
  3901. + bin = next_bin(bin);
  3902. + bit <<= 1;
  3903. + }
  3904. +
  3905. + else {
  3906. + size = chunksize(victim);
  3907. +
  3908. + /* We know the first chunk in this bin is big enough to use. */
  3909. + assert((unsigned long)(size) >= (unsigned long)(nb));
  3910. +
  3911. + remainder_size = size - nb;
  3912. +
  3913. + /* unlink */
  3914. + bck = victim->bk;
  3915. + bin->bk = bck;
  3916. + bck->fd = bin;
  3917. +
  3918. + /* Exhaust */
  3919. + if (remainder_size < MINSIZE) {
  3920. + set_inuse_bit_at_offset(victim, size);
  3921. + check_malloced_chunk(victim, nb);
  3922. + return chunk2mem(victim);
  3923. + }
  3924. +
  3925. + /* Split */
  3926. + else {
  3927. + remainder = chunk_at_offset(victim, nb);
  3928. +
  3929. + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
  3930. + remainder->bk = remainder->fd = unsorted_chunks(av);
  3931. + /* advertise as last remainder */
  3932. + if (in_smallbin_range(nb))
  3933. + av->last_remainder = remainder;
  3934. +
  3935. + set_head(victim, nb | PREV_INUSE);
  3936. + set_head(remainder, remainder_size | PREV_INUSE);
  3937. + set_foot(remainder, remainder_size);
  3938. + check_malloced_chunk(victim, nb);
  3939. + return chunk2mem(victim);
  3940. + }
  3941. + }
  3942. + }
  3943. +
  3944. + use_top:
  3945. + /*
  3946. + If large enough, split off the chunk bordering the end of memory
  3947. + (held in av->top). Note that this is in accord with the best-fit
  3948. + search rule. In effect, av->top is treated as larger (and thus
  3949. + less well fitting) than any other available chunk since it can
  3950. + be extended to be as large as necessary (up to system
  3951. + limitations).
  3952. +
  3953. + We require that av->top always exists (i.e., has size >=
  3954. + MINSIZE) after initialization, so if it would otherwise be
  3955. + exhuasted by current request, it is replenished. (The main
  3956. + reason for ensuring it exists is that we may need MINSIZE space
  3957. + to put in fenceposts in sysmalloc.)
  3958. + */
  3959. +
  3960. + victim = av->top;
  3961. + size = chunksize(victim);
  3962. +
  3963. + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
  3964. + remainder_size = size - nb;
  3965. + remainder = chunk_at_offset(victim, nb);
  3966. + av->top = remainder;
  3967. + set_head(victim, nb | PREV_INUSE);
  3968. + set_head(remainder, remainder_size | PREV_INUSE);
  3969. +
  3970. + check_malloced_chunk(victim, nb);
  3971. + return chunk2mem(victim);
  3972. + }
  3973. +
  3974. + /*
  3975. + If there is space available in fastbins, consolidate and retry,
  3976. + to possibly avoid expanding memory. This can occur only if nb is
  3977. + in smallbin range so we didn't consolidate upon entry.
  3978. + */
  3979. +
  3980. + else if (have_fastchunks(av)) {
  3981. + assert(in_smallbin_range(nb));
  3982. + malloc_consolidate(av);
  3983. + idx = smallbin_index(nb); /* restore original bin index */
  3984. + }
  3985. +
  3986. + /*
  3987. + Otherwise, relay to handle system-dependent cases
  3988. + */
  3989. + else
  3990. + return sYSMALLOc(nb, av);
  3991. + }
  3992. +}
  3993. +
  3994. +/*
  3995. + ------------------------------ realloc ------------------------------
  3996. +*/
  3997. +
  3998. +
  3999. +INLINE
  4000. +#if __STD_C
  4001. +Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
  4002. +#else
  4003. +Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
  4004. +#endif
  4005. +{
  4006. + mstate av = get_malloc_state();
  4007. +
  4008. + INTERNAL_SIZE_T nb; /* padded request size */
  4009. +
  4010. + mchunkptr oldp; /* chunk corresponding to oldmem */
  4011. + INTERNAL_SIZE_T oldsize; /* its size */
  4012. +
  4013. + mchunkptr newp; /* chunk to return */
  4014. + INTERNAL_SIZE_T newsize; /* its size */
  4015. + Void_t* newmem; /* corresponding user mem */
  4016. +
  4017. + mchunkptr next; /* next contiguous chunk after oldp */
  4018. +
  4019. + mchunkptr remainder; /* extra space at end of newp */
  4020. + unsigned long remainder_size; /* its size */
  4021. +
  4022. + mchunkptr bck; /* misc temp for linking */
  4023. + mchunkptr fwd; /* misc temp for linking */
  4024. +
  4025. + unsigned long copysize; /* bytes to copy */
  4026. + unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
  4027. + INTERNAL_SIZE_T* s; /* copy source */
  4028. + INTERNAL_SIZE_T* d; /* copy destination */
  4029. +
  4030. +
  4031. +#ifdef REALLOC_ZERO_BYTES_FREES
  4032. + if (bytes == 0) {
  4033. + fREe(oldmem);
  4034. + return 0;
  4035. + }
  4036. +#endif
  4037. +
  4038. + /* realloc of null is supposed to be same as malloc */
  4039. + if (oldmem == 0) return mALLOc(bytes);
  4040. +
  4041. + checked_request2size(bytes, nb);
  4042. +
  4043. + oldp = mem2chunk(oldmem);
  4044. + oldsize = chunksize(oldp);
  4045. +
  4046. + check_inuse_chunk(oldp);
  4047. +
  4048. + if (!chunk_is_mmapped(oldp)) {
  4049. +
  4050. + if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
  4051. + /* already big enough; split below */
  4052. + newp = oldp;
  4053. + newsize = oldsize;
  4054. + }
  4055. +
  4056. + else {
  4057. + next = chunk_at_offset(oldp, oldsize);
  4058. +
  4059. + /* Try to expand forward into top */
  4060. + if (next == av->top &&
  4061. + (unsigned long)(newsize = oldsize + chunksize(next)) >=
  4062. + (unsigned long)(nb + MINSIZE)) {
  4063. + set_head_size(oldp, nb);
  4064. + av->top = chunk_at_offset(oldp, nb);
  4065. + set_head(av->top, (newsize - nb) | PREV_INUSE);
  4066. + return chunk2mem(oldp);
  4067. + }
  4068. +
  4069. + /* Try to expand forward into next chunk; split off remainder below */
  4070. + else if (next != av->top &&
  4071. + !inuse(next) &&
  4072. + (unsigned long)(newsize = oldsize + chunksize(next)) >=
  4073. + (unsigned long)(nb)) {
  4074. + newp = oldp;
  4075. + unlink(next, bck, fwd);
  4076. + }
  4077. +
  4078. + /* allocate, copy, free */
  4079. + else {
  4080. + newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
  4081. + if (newmem == 0)
  4082. + return 0; /* propagate failure */
  4083. +
  4084. + newp = mem2chunk(newmem);
  4085. + newsize = chunksize(newp);
  4086. +
  4087. + /*
  4088. + Avoid copy if newp is next chunk after oldp.
  4089. + */
  4090. + if (newp == next) {
  4091. + newsize += oldsize;
  4092. + newp = oldp;
  4093. + }
  4094. + else {
  4095. + /*
  4096. + Unroll copy of <= 36 bytes (72 if 8byte sizes)
  4097. + We know that contents have an odd number of
  4098. + INTERNAL_SIZE_T-sized words; minimally 3.
  4099. + */
  4100. +
  4101. + copysize = oldsize - SIZE_SZ;
  4102. + s = (INTERNAL_SIZE_T*)(oldmem);
  4103. + d = (INTERNAL_SIZE_T*)(newmem);
  4104. + ncopies = copysize / sizeof(INTERNAL_SIZE_T);
  4105. + assert(ncopies >= 3);
  4106. +
  4107. + if (ncopies > 9)
  4108. + MALLOC_COPY(d, s, copysize);
  4109. +
  4110. + else {
  4111. + *(d+0) = *(s+0);
  4112. + *(d+1) = *(s+1);
  4113. + *(d+2) = *(s+2);
  4114. + if (ncopies > 4) {
  4115. + *(d+3) = *(s+3);
  4116. + *(d+4) = *(s+4);
  4117. + if (ncopies > 6) {
  4118. + *(d+5) = *(s+5);
  4119. + *(d+6) = *(s+6);
  4120. + if (ncopies > 8) {
  4121. + *(d+7) = *(s+7);
  4122. + *(d+8) = *(s+8);
  4123. + }
  4124. + }
  4125. + }
  4126. + }
  4127. +
  4128. + fREe(oldmem);
  4129. + check_inuse_chunk(newp);
  4130. + return chunk2mem(newp);
  4131. + }
  4132. + }
  4133. + }
  4134. +
  4135. + /* If possible, free extra space in old or extended chunk */
  4136. +
  4137. + assert((unsigned long)(newsize) >= (unsigned long)(nb));
  4138. +
  4139. + remainder_size = newsize - nb;
  4140. +
  4141. + if (remainder_size < MINSIZE) { /* not enough extra to split off */
  4142. + set_head_size(newp, newsize);
  4143. + set_inuse_bit_at_offset(newp, newsize);
  4144. + }
  4145. + else { /* split remainder */
  4146. + remainder = chunk_at_offset(newp, nb);
  4147. + set_head_size(newp, nb);
  4148. + set_head(remainder, remainder_size | PREV_INUSE);
  4149. + /* Mark remainder as inuse so free() won't complain */
  4150. + set_inuse_bit_at_offset(remainder, remainder_size);
  4151. + fREe(chunk2mem(remainder));
  4152. + }
  4153. +
  4154. + check_inuse_chunk(newp);
  4155. + return chunk2mem(newp);
  4156. + }
  4157. +
  4158. + /*
  4159. + Handle mmap cases
  4160. + */
  4161. +
  4162. + else {
  4163. +#if HAVE_MMAP
  4164. +
  4165. +#if HAVE_MREMAP
  4166. + INTERNAL_SIZE_T offset = oldp->prev_size;
  4167. + size_t pagemask = av->pagesize - 1;
  4168. + char *cp;
  4169. + unsigned long sum;
  4170. +
  4171. + /* Note the extra SIZE_SZ overhead */
  4172. + newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
  4173. +
  4174. + /* don't need to remap if still within same page */
  4175. + if (oldsize == newsize - offset)
  4176. + return oldmem;
  4177. +
  4178. + cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
  4179. +
  4180. + if (cp != (char*)MORECORE_FAILURE) {
  4181. +
  4182. + newp = (mchunkptr)(cp + offset);
  4183. + set_head(newp, (newsize - offset)|IS_MMAPPED);
  4184. +
  4185. + assert(aligned_OK(chunk2mem(newp)));
  4186. + assert((newp->prev_size == offset));
  4187. +
  4188. + /* update statistics */
  4189. + sum = av->mmapped_mem += newsize - oldsize;
  4190. + if (sum > (unsigned long)(av->max_mmapped_mem))
  4191. + av->max_mmapped_mem = sum;
  4192. + sum += av->sbrked_mem;
  4193. + if (sum > (unsigned long)(av->max_total_mem))
  4194. + av->max_total_mem = sum;
  4195. +
  4196. + return chunk2mem(newp);
  4197. + }
  4198. +#endif
  4199. +
  4200. + /* Note the extra SIZE_SZ overhead. */
  4201. + if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
  4202. + newmem = oldmem; /* do nothing */
  4203. + else {
  4204. + /* Must alloc, copy, free. */
  4205. + newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
  4206. + if (newmem != 0) {
  4207. + MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
  4208. + fREe(oldmem);
  4209. + }
  4210. + }
  4211. + return newmem;
  4212. +
  4213. +#else
  4214. + /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
  4215. + check_malloc_state();
  4216. + MALLOC_FAILURE_ACTION;
  4217. + return 0;
  4218. +#endif
  4219. + }
  4220. +}
  4221. +
  4222. +/*
  4223. + ------------------------------ memalign ------------------------------
  4224. +*/
  4225. +
  4226. +INLINE
  4227. +#if __STD_C
  4228. +Void_t* mEMALIGn(size_t alignment, size_t bytes)
  4229. +#else
  4230. +Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
  4231. +#endif
  4232. +{
  4233. + INTERNAL_SIZE_T nb; /* padded request size */
  4234. + char* m; /* memory returned by malloc call */
  4235. + mchunkptr p; /* corresponding chunk */
  4236. + char* brk; /* alignment point within p */
  4237. + mchunkptr newp; /* chunk to return */
  4238. + INTERNAL_SIZE_T newsize; /* its size */
  4239. + INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
  4240. + mchunkptr remainder; /* spare room at end to split off */
  4241. + unsigned long remainder_size; /* its size */
  4242. + INTERNAL_SIZE_T size;
  4243. +
  4244. + /* If need less alignment than we give anyway, just relay to malloc */
  4245. +
  4246. + if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
  4247. +
  4248. + /* Otherwise, ensure that it is at least a minimum chunk size */
  4249. +
  4250. + if (alignment < MINSIZE) alignment = MINSIZE;
  4251. +
  4252. + /* Make sure alignment is power of 2 (in case MINSIZE is not). */
  4253. + if ((alignment & (alignment - 1)) != 0) {
  4254. + size_t a = MALLOC_ALIGNMENT * 2;
  4255. + while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
  4256. + alignment = a;
  4257. + }
  4258. +
  4259. + checked_request2size(bytes, nb);
  4260. +
  4261. + /*
  4262. + Strategy: find a spot within that chunk that meets the alignment
  4263. + request, and then possibly free the leading and trailing space.
  4264. + */
  4265. +
  4266. +
  4267. + /* Call malloc with worst case padding to hit alignment. */
  4268. +
  4269. + m = (char*)(mALLOc(nb + alignment + MINSIZE));
  4270. +
  4271. + if (m == 0) return 0; /* propagate failure */
  4272. +
  4273. + p = mem2chunk(m);
  4274. +
  4275. + if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
  4276. +
  4277. + /*
  4278. + Find an aligned spot inside chunk. Since we need to give back
  4279. + leading space in a chunk of at least MINSIZE, if the first
  4280. + calculation places us at a spot with less than MINSIZE leader,
  4281. + we can move to the next aligned spot -- we've allocated enough
  4282. + total room so that this is always possible.
  4283. + */
  4284. +
  4285. + brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
  4286. + -((signed long) alignment));
  4287. + if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
  4288. + brk += alignment;
  4289. +
  4290. + newp = (mchunkptr)brk;
  4291. + leadsize = brk - (char*)(p);
  4292. + newsize = chunksize(p) - leadsize;
  4293. +
  4294. + /* For mmapped chunks, just adjust offset */
  4295. + if (chunk_is_mmapped(p)) {
  4296. + newp->prev_size = p->prev_size + leadsize;
  4297. + set_head(newp, newsize|IS_MMAPPED);
  4298. + return chunk2mem(newp);
  4299. + }
  4300. +
  4301. + /* Otherwise, give back leader, use the rest */
  4302. + set_head(newp, newsize | PREV_INUSE);
  4303. + set_inuse_bit_at_offset(newp, newsize);
  4304. + set_head_size(p, leadsize);
  4305. + fREe(chunk2mem(p));
  4306. + p = newp;
  4307. +
  4308. + assert (newsize >= nb &&
  4309. + (((unsigned long)(chunk2mem(p))) % alignment) == 0);
  4310. + }
  4311. +
  4312. + /* Also give back spare room at the end */
  4313. + if (!chunk_is_mmapped(p)) {
  4314. + size = chunksize(p);
  4315. + if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
  4316. + remainder_size = size - nb;
  4317. + remainder = chunk_at_offset(p, nb);
  4318. + set_head(remainder, remainder_size | PREV_INUSE);
  4319. + set_head_size(p, nb);
  4320. + fREe(chunk2mem(remainder));
  4321. + }
  4322. + }
  4323. +
  4324. + check_inuse_chunk(p);
  4325. + return chunk2mem(p);
  4326. +}
  4327. +
  4328. +/*
  4329. + ------------------------------ calloc ------------------------------
  4330. +*/
  4331. +
  4332. +INLINE
  4333. +#if __STD_C
  4334. +Void_t* cALLOc(size_t n_elements, size_t elem_size)
  4335. +#else
  4336. +Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
  4337. +#endif
  4338. +{
  4339. + mchunkptr p;
  4340. + unsigned long clearsize;
  4341. + unsigned long nclears;
  4342. + INTERNAL_SIZE_T* d;
  4343. +
  4344. + Void_t* mem = mALLOc(n_elements * elem_size);
  4345. +
  4346. + if (mem != 0) {
  4347. + p = mem2chunk(mem);
  4348. +
  4349. + if (!chunk_is_mmapped(p))
  4350. + {
  4351. + /*
  4352. + Unroll clear of <= 36 bytes (72 if 8byte sizes)
  4353. + We know that contents have an odd number of
  4354. + INTERNAL_SIZE_T-sized words; minimally 3.
  4355. + */
  4356. +
  4357. + d = (INTERNAL_SIZE_T*)mem;
  4358. + clearsize = chunksize(p) - SIZE_SZ;
  4359. + nclears = clearsize / sizeof(INTERNAL_SIZE_T);
  4360. + assert(nclears >= 3);
  4361. +
  4362. + if (nclears > 9)
  4363. + MALLOC_ZERO(d, clearsize);
  4364. +
  4365. + else {
  4366. + *(d+0) = 0;
  4367. + *(d+1) = 0;
  4368. + *(d+2) = 0;
  4369. + if (nclears > 4) {
  4370. + *(d+3) = 0;
  4371. + *(d+4) = 0;
  4372. + if (nclears > 6) {
  4373. + *(d+5) = 0;
  4374. + *(d+6) = 0;
  4375. + if (nclears > 8) {
  4376. + *(d+7) = 0;
  4377. + *(d+8) = 0;
  4378. + }
  4379. + }
  4380. + }
  4381. + }
  4382. + }
  4383. +#if ! MMAP_CLEARS
  4384. + else
  4385. + {
  4386. + d = (INTERNAL_SIZE_T*)mem;
  4387. + clearsize = chunksize(p) - 2 * SIZE_SZ;
  4388. + MALLOC_ZERO(d, clearsize);
  4389. + }
  4390. +#endif
  4391. + }
  4392. + return mem;
  4393. +}
  4394. +
  4395. +/*
  4396. + ------------------------------ cfree ------------------------------
  4397. +*/
  4398. +
  4399. +INLINE
  4400. +#if __STD_C
  4401. +void cFREe(Void_t *mem)
  4402. +#else
  4403. +void cFREe(mem) Void_t *mem;
  4404. +#endif
  4405. +{
  4406. + fREe(mem);
  4407. +}
  4408. +
  4409. +/*
  4410. + ------------------------------ ialloc ------------------------------
  4411. + ialloc provides common support for independent_X routines, handling all of
  4412. + the combinations that can result.
  4413. +
  4414. + The opts arg has:
  4415. + bit 0 set if all elements are same size (using sizes[0])
  4416. + bit 1 set if elements should be zeroed
  4417. +*/
  4418. +
  4419. +
  4420. +INLINE
  4421. +#if __STD_C
  4422. +static Void_t** iALLOc(size_t n_elements,
  4423. + size_t* sizes,
  4424. + int opts,
  4425. + Void_t* chunks[])
  4426. +#else
  4427. +static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
  4428. +#endif
  4429. +{
  4430. + mstate av = get_malloc_state();
  4431. + INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
  4432. + INTERNAL_SIZE_T contents_size; /* total size of elements */
  4433. + INTERNAL_SIZE_T array_size; /* request size of pointer array */
  4434. + Void_t* mem; /* malloced aggregate space */
  4435. + mchunkptr p; /* corresponding chunk */
  4436. + INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
  4437. + Void_t** marray; /* either "chunks" or malloced ptr array */
  4438. + mchunkptr array_chunk; /* chunk for malloced ptr array */
  4439. + int mmx; /* to disable mmap */
  4440. + INTERNAL_SIZE_T size;
  4441. + size_t i;
  4442. +
  4443. + /* Ensure initialization/consolidation */
  4444. + if (have_fastchunks(av)) malloc_consolidate(av);
  4445. +
  4446. + /* compute array length, if needed */
  4447. + if (chunks != 0) {
  4448. + if (n_elements == 0)
  4449. + return chunks; /* nothing to do */
  4450. + marray = chunks;
  4451. + array_size = 0;
  4452. + }
  4453. + else {
  4454. + /* if empty req, must still return chunk representing empty array */
  4455. + if (n_elements == 0)
  4456. + return (Void_t**) mALLOc(0);
  4457. + marray = 0;
  4458. + array_size = request2size(n_elements * (sizeof(Void_t*)));
  4459. + }
  4460. +
  4461. + /* compute total element size */
  4462. + if (opts & 0x1) { /* all-same-size */
  4463. + element_size = request2size(*sizes);
  4464. + contents_size = n_elements * element_size;
  4465. + }
  4466. + else { /* add up all the sizes */
  4467. + element_size = 0;
  4468. + contents_size = 0;
  4469. + for (i = 0; i != n_elements; ++i)
  4470. + contents_size += request2size(sizes[i]);
  4471. + }
  4472. +
  4473. + /* subtract out alignment bytes from total to minimize overallocation */
  4474. + size = contents_size + array_size - MALLOC_ALIGN_MASK;
  4475. +
  4476. + /*
  4477. + Allocate the aggregate chunk.
  4478. + But first disable mmap so malloc won't use it, since
  4479. + we would not be able to later free/realloc space internal
  4480. + to a segregated mmap region.
  4481. + */
  4482. + mmx = av->n_mmaps_max; /* disable mmap */
  4483. + av->n_mmaps_max = 0;
  4484. + mem = mALLOc(size);
  4485. + av->n_mmaps_max = mmx; /* reset mmap */
  4486. + if (mem == 0)
  4487. + return 0;
  4488. +
  4489. + p = mem2chunk(mem);
  4490. + assert(!chunk_is_mmapped(p));
  4491. + remainder_size = chunksize(p);
  4492. +
  4493. + if (opts & 0x2) { /* optionally clear the elements */
  4494. + MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
  4495. + }
  4496. +
  4497. + /* If not provided, allocate the pointer array as final part of chunk */
  4498. + if (marray == 0) {
  4499. + array_chunk = chunk_at_offset(p, contents_size);
  4500. + marray = (Void_t**) (chunk2mem(array_chunk));
  4501. + set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
  4502. + remainder_size = contents_size;
  4503. + }
  4504. +
  4505. + /* split out elements */
  4506. + for (i = 0; ; ++i) {
  4507. + marray[i] = chunk2mem(p);
  4508. + if (i != n_elements-1) {
  4509. + if (element_size != 0)
  4510. + size = element_size;
  4511. + else
  4512. + size = request2size(sizes[i]);
  4513. + remainder_size -= size;
  4514. + set_head(p, size | PREV_INUSE);
  4515. + p = chunk_at_offset(p, size);
  4516. + }
  4517. + else { /* the final element absorbs any overallocation slop */
  4518. + set_head(p, remainder_size | PREV_INUSE);
  4519. + break;
  4520. + }
  4521. + }
  4522. +
  4523. +#ifdef DEBUG
  4524. + if (marray != chunks) {
  4525. + /* final element must have exactly exhausted chunk */
  4526. + if (element_size != 0)
  4527. + assert(remainder_size == element_size);
  4528. + else
  4529. + assert(remainder_size == request2size(sizes[i]));
  4530. + check_inuse_chunk(mem2chunk(marray));
  4531. + }
  4532. +
  4533. + for (i = 0; i != n_elements; ++i)
  4534. + check_inuse_chunk(mem2chunk(marray[i]));
  4535. +#endif
  4536. +
  4537. + return marray;
  4538. +}
  4539. +
  4540. +
  4541. +/*
  4542. + ------------------------- independent_calloc -------------------------
  4543. +*/
  4544. +
  4545. +INLINE
  4546. +#if __STD_C
  4547. +Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
  4548. +#else
  4549. +Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
  4550. +#endif
  4551. +{
  4552. + size_t sz = elem_size; /* serves as 1-element array */
  4553. + /* opts arg of 3 means all elements are same size, and should be cleared */
  4554. + return iALLOc(n_elements, &sz, 3, chunks);
  4555. +}
  4556. +
  4557. +/*
  4558. + ------------------------- independent_comalloc -------------------------
  4559. +*/
  4560. +
  4561. +INLINE
  4562. +#if __STD_C
  4563. +Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
  4564. +#else
  4565. +Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
  4566. +#endif
  4567. +{
  4568. + return iALLOc(n_elements, sizes, 0, chunks);
  4569. +}
  4570. +
  4571. +
  4572. +/*
  4573. + ------------------------------ valloc ------------------------------
  4574. +*/
  4575. +
  4576. +INLINE
  4577. +#if __STD_C
  4578. +Void_t* vALLOc(size_t bytes)
  4579. +#else
  4580. +Void_t* vALLOc(bytes) size_t bytes;
  4581. +#endif
  4582. +{
  4583. + /* Ensure initialization/consolidation */
  4584. + mstate av = get_malloc_state();
  4585. + if (have_fastchunks(av)) malloc_consolidate(av);
  4586. + return mEMALIGn(av->pagesize, bytes);
  4587. +}
  4588. +
  4589. +/*
  4590. + ------------------------------ pvalloc ------------------------------
  4591. +*/
  4592. +
  4593. +
  4594. +#if __STD_C
  4595. +Void_t* pVALLOc(size_t bytes)
  4596. +#else
  4597. +Void_t* pVALLOc(bytes) size_t bytes;
  4598. +#endif
  4599. +{
  4600. + mstate av = get_malloc_state();
  4601. + size_t pagesz;
  4602. +
  4603. + /* Ensure initialization/consolidation */
  4604. + if (have_fastchunks(av)) malloc_consolidate(av);
  4605. + pagesz = av->pagesize;
  4606. + return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
  4607. +}
  4608. +
  4609. +
  4610. +/*
  4611. + ------------------------------ malloc_trim ------------------------------
  4612. +*/
  4613. +
  4614. +INLINE
  4615. +#if __STD_C
  4616. +int mTRIm(size_t pad)
  4617. +#else
  4618. +int mTRIm(pad) size_t pad;
  4619. +#endif
  4620. +{
  4621. + mstate av = get_malloc_state();
  4622. + /* Ensure initialization/consolidation */
  4623. + malloc_consolidate(av);
  4624. +
  4625. +#ifndef MORECORE_CANNOT_TRIM
  4626. + return sYSTRIm(pad, av);
  4627. +#else
  4628. + return 0;
  4629. +#endif
  4630. +}
  4631. +
  4632. +
  4633. +/*
  4634. + ------------------------- malloc_usable_size -------------------------
  4635. +*/
  4636. +
  4637. +INLINE
  4638. +#if __STD_C
  4639. +size_t mUSABLe(Void_t* mem)
  4640. +#else
  4641. +size_t mUSABLe(mem) Void_t* mem;
  4642. +#endif
  4643. +{
  4644. + mchunkptr p;
  4645. + if (mem != 0) {
  4646. + p = mem2chunk(mem);
  4647. + if (chunk_is_mmapped(p))
  4648. + return chunksize(p) - 2*SIZE_SZ;
  4649. + else if (inuse(p))
  4650. + return chunksize(p) - SIZE_SZ;
  4651. + }
  4652. + return 0;
  4653. +}
  4654. +
  4655. +/*
  4656. + ------------------------------ mallinfo ------------------------------
  4657. +*/
  4658. +
  4659. +struct mallinfo mALLINFo()
  4660. +{
  4661. + mstate av = get_malloc_state();
  4662. + struct mallinfo mi;
  4663. + unsigned int i;
  4664. + mbinptr b;
  4665. + mchunkptr p;
  4666. + INTERNAL_SIZE_T avail;
  4667. + INTERNAL_SIZE_T fastavail;
  4668. + int nblocks;
  4669. + int nfastblocks;
  4670. +
  4671. + /* Ensure initialization */
  4672. + if (av->top == 0) malloc_consolidate(av);
  4673. +
  4674. + check_malloc_state();
  4675. +
  4676. + /* Account for top */
  4677. + avail = chunksize(av->top);
  4678. + nblocks = 1; /* top always exists */
  4679. +
  4680. + /* traverse fastbins */
  4681. + nfastblocks = 0;
  4682. + fastavail = 0;
  4683. +
  4684. + for (i = 0; i < NFASTBINS; ++i) {
  4685. + for (p = av->fastbins[i]; p != 0; p = p->fd) {
  4686. + ++nfastblocks;
  4687. + fastavail += chunksize(p);
  4688. + }
  4689. + }
  4690. +
  4691. + avail += fastavail;
  4692. +
  4693. + /* traverse regular bins */
  4694. + for (i = 1; i < NBINS; ++i) {
  4695. + b = bin_at(av, i);
  4696. + for (p = last(b); p != b; p = p->bk) {
  4697. + ++nblocks;
  4698. + avail += chunksize(p);
  4699. + }
  4700. + }
  4701. +
  4702. + mi.smblks = nfastblocks;
  4703. + mi.ordblks = nblocks;
  4704. + mi.fordblks = avail;
  4705. + mi.uordblks = av->sbrked_mem - avail;
  4706. + mi.arena = av->sbrked_mem;
  4707. + mi.hblks = av->n_mmaps;
  4708. + mi.hblkhd = av->mmapped_mem;
  4709. + mi.fsmblks = fastavail;
  4710. + mi.keepcost = chunksize(av->top);
  4711. + mi.usmblks = av->max_total_mem;
  4712. + return mi;
  4713. +}
  4714. +
  4715. +/*
  4716. + ------------------------------ malloc_stats ------------------------------
  4717. +*/
  4718. +
  4719. +void mSTATs()
  4720. +{
  4721. + struct mallinfo mi = mALLINFo();
  4722. +
  4723. +#ifdef WIN32
  4724. + {
  4725. + unsigned long free, reserved, committed;
  4726. + vminfo (&free, &reserved, &committed);
  4727. + fprintf(stderr, "free bytes = %10lu\n",
  4728. + free);
  4729. + fprintf(stderr, "reserved bytes = %10lu\n",
  4730. + reserved);
  4731. + fprintf(stderr, "committed bytes = %10lu\n",
  4732. + committed);
  4733. + }
  4734. +#endif
  4735. +
  4736. +
  4737. + fprintf(stderr, "max system bytes = %10lu\n",
  4738. + (unsigned long)(mi.usmblks));
  4739. + fprintf(stderr, "system bytes = %10lu\n",
  4740. + (unsigned long)(mi.arena + mi.hblkhd));
  4741. + fprintf(stderr, "in use bytes = %10lu\n",
  4742. + (unsigned long)(mi.uordblks + mi.hblkhd));
  4743. +
  4744. +
  4745. +#ifdef WIN32
  4746. + {
  4747. + unsigned long kernel, user;
  4748. + if (cpuinfo (TRUE, &kernel, &user)) {
  4749. + fprintf(stderr, "kernel ms = %10lu\n",
  4750. + kernel);
  4751. + fprintf(stderr, "user ms = %10lu\n",
  4752. + user);
  4753. + }
  4754. + }
  4755. +#endif
  4756. +}
  4757. +
  4758. +
  4759. +/*
  4760. + ------------------------------ mallopt ------------------------------
  4761. +*/
  4762. +
  4763. +INLINE
  4764. +#if __STD_C
  4765. +int mALLOPt(int param_number, int value)
  4766. +#else
  4767. +int mALLOPt(param_number, value) int param_number; int value;
  4768. +#endif
  4769. +{
  4770. + mstate av = get_malloc_state();
  4771. + /* Ensure initialization/consolidation */
  4772. + malloc_consolidate(av);
  4773. +
  4774. + switch(param_number) {
  4775. + case M_MXFAST:
  4776. + if (value >= 0 && value <= MAX_FAST_SIZE) {
  4777. + set_max_fast(av, value);
  4778. + return 1;
  4779. + }
  4780. + else
  4781. + return 0;
  4782. +
  4783. + case M_TRIM_THRESHOLD:
  4784. + av->trim_threshold = value;
  4785. + return 1;
  4786. +
  4787. + case M_TOP_PAD:
  4788. + av->top_pad = value;
  4789. + return 1;
  4790. +
  4791. + case M_MMAP_THRESHOLD:
  4792. + av->mmap_threshold = value;
  4793. + return 1;
  4794. +
  4795. + case M_MMAP_MAX:
  4796. +#if !HAVE_MMAP
  4797. + if (value != 0)
  4798. + return 0;
  4799. +#endif
  4800. + av->n_mmaps_max = value;
  4801. + return 1;
  4802. +
  4803. + default:
  4804. + return 0;
  4805. + }
  4806. +}
  4807. +
  4808. +
  4809. +/*
  4810. + -------------------- Alternative MORECORE functions --------------------
  4811. +*/
  4812. +
  4813. +
  4814. +/*
  4815. + General Requirements for MORECORE.
  4816. +
  4817. + The MORECORE function must have the following properties:
  4818. +
  4819. + If MORECORE_CONTIGUOUS is false:
  4820. +
  4821. + * MORECORE must allocate in multiples of pagesize. It will
  4822. + only be called with arguments that are multiples of pagesize.
  4823. +
  4824. + * MORECORE(0) must return an address that is at least
  4825. + MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
  4826. +
  4827. + else (i.e. If MORECORE_CONTIGUOUS is true):
  4828. +
  4829. + * Consecutive calls to MORECORE with positive arguments
  4830. + return increasing addresses, indicating that space has been
  4831. + contiguously extended.
  4832. +
  4833. + * MORECORE need not allocate in multiples of pagesize.
  4834. + Calls to MORECORE need not have args of multiples of pagesize.
  4835. +
  4836. + * MORECORE need not page-align.
  4837. +
  4838. + In either case:
  4839. +
  4840. + * MORECORE may allocate more memory than requested. (Or even less,
  4841. + but this will generally result in a malloc failure.)
  4842. +
  4843. + * MORECORE must not allocate memory when given argument zero, but
  4844. + instead return one past the end address of memory from previous
  4845. + nonzero call. This malloc does NOT call MORECORE(0)
  4846. + until at least one call with positive arguments is made, so
  4847. + the initial value returned is not important.
  4848. +
  4849. + * Even though consecutive calls to MORECORE need not return contiguous
  4850. + addresses, it must be OK for malloc'ed chunks to span multiple
  4851. + regions in those cases where they do happen to be contiguous.
  4852. +
  4853. + * MORECORE need not handle negative arguments -- it may instead
  4854. + just return MORECORE_FAILURE when given negative arguments.
  4855. + Negative arguments are always multiples of pagesize. MORECORE
  4856. + must not misinterpret negative args as large positive unsigned
  4857. + args. You can suppress all such calls from even occurring by defining
  4858. + MORECORE_CANNOT_TRIM,
  4859. +
  4860. + There is some variation across systems about the type of the
  4861. + argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
  4862. + actually be size_t, because sbrk supports negative args, so it is
  4863. + normally the signed type of the same width as size_t (sometimes
  4864. + declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
  4865. + matter though. Internally, we use "long" as arguments, which should
  4866. + work across all reasonable possibilities.
  4867. +
  4868. + Additionally, if MORECORE ever returns failure for a positive
  4869. + request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
  4870. + system allocator. This is a useful backup strategy for systems with
  4871. + holes in address spaces -- in this case sbrk cannot contiguously
  4872. + expand the heap, but mmap may be able to map noncontiguous space.
  4873. +
  4874. + If you'd like mmap to ALWAYS be used, you can define MORECORE to be
  4875. + a function that always returns MORECORE_FAILURE.
  4876. +
  4877. + If you are using this malloc with something other than sbrk (or its
  4878. + emulation) to supply memory regions, you probably want to set
  4879. + MORECORE_CONTIGUOUS as false. As an example, here is a custom
  4880. + allocator kindly contributed for pre-OSX macOS. It uses virtually
  4881. + but not necessarily physically contiguous non-paged memory (locked
  4882. + in, present and won't get swapped out). You can use it by
  4883. + uncommenting this section, adding some #includes, and setting up the
  4884. + appropriate defines above:
  4885. +
  4886. + #define MORECORE osMoreCore
  4887. + #define MORECORE_CONTIGUOUS 0
  4888. +
  4889. + There is also a shutdown routine that should somehow be called for
  4890. + cleanup upon program exit.
  4891. +
  4892. + #define MAX_POOL_ENTRIES 100
  4893. + #define MINIMUM_MORECORE_SIZE (64 * 1024)
  4894. + static int next_os_pool;
  4895. + void *our_os_pools[MAX_POOL_ENTRIES];
  4896. +
  4897. + void *osMoreCore(int size)
  4898. + {
  4899. + void *ptr = 0;
  4900. + static void *sbrk_top = 0;
  4901. +
  4902. + if (size > 0)
  4903. + {
  4904. + if (size < MINIMUM_MORECORE_SIZE)
  4905. + size = MINIMUM_MORECORE_SIZE;
  4906. + if (CurrentExecutionLevel() == kTaskLevel)
  4907. + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  4908. + if (ptr == 0)
  4909. + {
  4910. + return (void *) MORECORE_FAILURE;
  4911. + }
  4912. + // save ptrs so they can be freed during cleanup
  4913. + our_os_pools[next_os_pool] = ptr;
  4914. + next_os_pool++;
  4915. + ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  4916. + sbrk_top = (char *) ptr + size;
  4917. + return ptr;
  4918. + }
  4919. + else if (size < 0)
  4920. + {
  4921. + // we don't currently support shrink behavior
  4922. + return (void *) MORECORE_FAILURE;
  4923. + }
  4924. + else
  4925. + {
  4926. + return sbrk_top;
  4927. + }
  4928. + }
  4929. +
  4930. + // cleanup any allocated memory pools
  4931. + // called as last thing before shutting down driver
  4932. +
  4933. + void osCleanupMem(void)
  4934. + {
  4935. + void **ptr;
  4936. +
  4937. + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  4938. + if (*ptr)
  4939. + {
  4940. + PoolDeallocate(*ptr);
  4941. + *ptr = 0;
  4942. + }
  4943. + }
  4944. +
  4945. +*/
  4946. +
  4947. +
  4948. +/*
  4949. + --------------------------------------------------------------
  4950. +
  4951. + Emulation of sbrk for win32.
  4952. + Donated by J. Walter <Walter@GeNeSys-e.de>.
  4953. + For additional information about this code, and malloc on Win32, see
  4954. + http://www.genesys-e.de/jwalter/
  4955. +*/
  4956. +
  4957. +
  4958. +#ifdef WIN32
  4959. +
  4960. +#ifdef _DEBUG
  4961. +/* #define TRACE */
  4962. +#endif
  4963. +
  4964. +/* Support for USE_MALLOC_LOCK */
  4965. +#ifdef USE_MALLOC_LOCK
  4966. +
  4967. +/* Wait for spin lock */
  4968. +static int slwait (int *sl) {
  4969. + while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
  4970. + Sleep (0);
  4971. + return 0;
  4972. +}
  4973. +
  4974. +/* Release spin lock */
  4975. +static int slrelease (int *sl) {
  4976. + InterlockedExchange (sl, 0);
  4977. + return 0;
  4978. +}
  4979. +
  4980. +#ifdef NEEDED
  4981. +/* Spin lock for emulation code */
  4982. +static int g_sl;
  4983. +#endif
  4984. +
  4985. +#endif /* USE_MALLOC_LOCK */
  4986. +
  4987. +/* getpagesize for windows */
  4988. +static long getpagesize (void) {
  4989. + static long g_pagesize = 0;
  4990. + if (! g_pagesize) {
  4991. + SYSTEM_INFO system_info;
  4992. + GetSystemInfo (&system_info);
  4993. + g_pagesize = system_info.dwPageSize;
  4994. + }
  4995. + return g_pagesize;
  4996. +}
  4997. +static long getregionsize (void) {
  4998. + static long g_regionsize = 0;
  4999. + if (! g_regionsize) {
  5000. + SYSTEM_INFO system_info;
  5001. + GetSystemInfo (&system_info);
  5002. + g_regionsize = system_info.dwAllocationGranularity;
  5003. + }
  5004. + return g_regionsize;
  5005. +}
  5006. +
  5007. +/* A region list entry */
  5008. +typedef struct _region_list_entry {
  5009. + void *top_allocated;
  5010. + void *top_committed;
  5011. + void *top_reserved;
  5012. + long reserve_size;
  5013. + struct _region_list_entry *previous;
  5014. +} region_list_entry;
  5015. +
  5016. +/* Allocate and link a region entry in the region list */
  5017. +static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
  5018. + region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
  5019. + if (! next)
  5020. + return FALSE;
  5021. + next->top_allocated = (char *) base_reserved;
  5022. + next->top_committed = (char *) base_reserved;
  5023. + next->top_reserved = (char *) base_reserved + reserve_size;
  5024. + next->reserve_size = reserve_size;
  5025. + next->previous = *last;
  5026. + *last = next;
  5027. + return TRUE;
  5028. +}
  5029. +/* Free and unlink the last region entry from the region list */
  5030. +static int region_list_remove (region_list_entry **last) {
  5031. + region_list_entry *previous = (*last)->previous;
  5032. + if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
  5033. + return FALSE;
  5034. + *last = previous;
  5035. + return TRUE;
  5036. +}
  5037. +
  5038. +#define CEIL(size,to) (((size)+(to)-1)&~((to)-1))
  5039. +#define FLOOR(size,to) ((size)&~((to)-1))
  5040. +
  5041. +#define SBRK_SCALE 0
  5042. +/* #define SBRK_SCALE 1 */
  5043. +/* #define SBRK_SCALE 2 */
  5044. +/* #define SBRK_SCALE 4 */
  5045. +
  5046. +/* sbrk for windows */
  5047. +static void *sbrk (long size) {
  5048. + static long g_pagesize, g_my_pagesize;
  5049. + static long g_regionsize, g_my_regionsize;
  5050. + static region_list_entry *g_last;
  5051. + void *result = (void *) MORECORE_FAILURE;
  5052. +#ifdef TRACE
  5053. + printf ("sbrk %d\n", size);
  5054. +#endif
  5055. +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5056. + /* Wait for spin lock */
  5057. + slwait (&g_sl);
  5058. +#endif
  5059. + /* First time initialization */
  5060. + if (! g_pagesize) {
  5061. + g_pagesize = getpagesize ();
  5062. + g_my_pagesize = g_pagesize << SBRK_SCALE;
  5063. + }
  5064. + if (! g_regionsize) {
  5065. + g_regionsize = getregionsize ();
  5066. + g_my_regionsize = g_regionsize << SBRK_SCALE;
  5067. + }
  5068. + if (! g_last) {
  5069. + if (! region_list_append (&g_last, 0, 0))
  5070. + goto sbrk_exit;
  5071. + }
  5072. + /* Assert invariants */
  5073. + assert (g_last);
  5074. + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
  5075. + g_last->top_allocated <= g_last->top_committed);
  5076. + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
  5077. + g_last->top_committed <= g_last->top_reserved &&
  5078. + (unsigned) g_last->top_committed % g_pagesize == 0);
  5079. + assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
  5080. + assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
  5081. + /* Allocation requested? */
  5082. + if (size >= 0) {
  5083. + /* Allocation size is the requested size */
  5084. + long allocate_size = size;
  5085. + /* Compute the size to commit */
  5086. + long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
  5087. + /* Do we reach the commit limit? */
  5088. + if (to_commit > 0) {
  5089. + /* Round size to commit */
  5090. + long commit_size = CEIL (to_commit, g_my_pagesize);
  5091. + /* Compute the size to reserve */
  5092. + long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
  5093. + /* Do we reach the reserve limit? */
  5094. + if (to_reserve > 0) {
  5095. + /* Compute the remaining size to commit in the current region */
  5096. + long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
  5097. + if (remaining_commit_size > 0) {
  5098. + /* Assert preconditions */
  5099. + assert ((unsigned) g_last->top_committed % g_pagesize == 0);
  5100. + assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
  5101. + /* Commit this */
  5102. + void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
  5103. + MEM_COMMIT, PAGE_READWRITE);
  5104. + /* Check returned pointer for consistency */
  5105. + if (base_committed != g_last->top_committed)
  5106. + goto sbrk_exit;
  5107. + /* Assert postconditions */
  5108. + assert ((unsigned) base_committed % g_pagesize == 0);
  5109. +#ifdef TRACE
  5110. + printf ("Commit %p %d\n", base_committed, remaining_commit_size);
  5111. +#endif
  5112. + /* Adjust the regions commit top */
  5113. + g_last->top_committed = (char *) base_committed + remaining_commit_size;
  5114. + }
  5115. + } {
  5116. + /* Now we are going to search and reserve. */
  5117. + int contiguous = -1;
  5118. + int found = FALSE;
  5119. + MEMORY_BASIC_INFORMATION memory_info;
  5120. + void *base_reserved;
  5121. + long reserve_size;
  5122. + do {
  5123. + /* Assume contiguous memory */
  5124. + contiguous = TRUE;
  5125. + /* Round size to reserve */
  5126. + reserve_size = CEIL (to_reserve, g_my_regionsize);
  5127. + /* Start with the current region's top */
  5128. + memory_info.BaseAddress = g_last->top_reserved;
  5129. + /* Assert preconditions */
  5130. + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
  5131. + assert (0 < reserve_size && reserve_size % g_regionsize == 0);
  5132. + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
  5133. + /* Assert postconditions */
  5134. + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
  5135. +#ifdef TRACE
  5136. + printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
  5137. + memory_info.State == MEM_FREE ? "FREE":
  5138. + (memory_info.State == MEM_RESERVE ? "RESERVED":
  5139. + (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
  5140. +#endif
  5141. + /* Region is free, well aligned and big enough: we are done */
  5142. + if (memory_info.State == MEM_FREE &&
  5143. + (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
  5144. + memory_info.RegionSize >= (unsigned) reserve_size) {
  5145. + found = TRUE;
  5146. + break;
  5147. + }
  5148. + /* From now on we can't get contiguous memory! */
  5149. + contiguous = FALSE;
  5150. + /* Recompute size to reserve */
  5151. + reserve_size = CEIL (allocate_size, g_my_regionsize);
  5152. + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
  5153. + /* Assert preconditions */
  5154. + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
  5155. + assert (0 < reserve_size && reserve_size % g_regionsize == 0);
  5156. + }
  5157. + /* Search failed? */
  5158. + if (! found)
  5159. + goto sbrk_exit;
  5160. + /* Assert preconditions */
  5161. + assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
  5162. + assert (0 < reserve_size && reserve_size % g_regionsize == 0);
  5163. + /* Try to reserve this */
  5164. + base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
  5165. + MEM_RESERVE, PAGE_NOACCESS);
  5166. + if (! base_reserved) {
  5167. + int rc = GetLastError ();
  5168. + if (rc != ERROR_INVALID_ADDRESS)
  5169. + goto sbrk_exit;
  5170. + }
  5171. + /* A null pointer signals (hopefully) a race condition with another thread. */
  5172. + /* In this case, we try again. */
  5173. + } while (! base_reserved);
  5174. + /* Check returned pointer for consistency */
  5175. + if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
  5176. + goto sbrk_exit;
  5177. + /* Assert postconditions */
  5178. + assert ((unsigned) base_reserved % g_regionsize == 0);
  5179. +#ifdef TRACE
  5180. + printf ("Reserve %p %d\n", base_reserved, reserve_size);
  5181. +#endif
  5182. + /* Did we get contiguous memory? */
  5183. + if (contiguous) {
  5184. + long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
  5185. + /* Adjust allocation size */
  5186. + allocate_size -= start_size;
  5187. + /* Adjust the regions allocation top */
  5188. + g_last->top_allocated = g_last->top_committed;
  5189. + /* Recompute the size to commit */
  5190. + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
  5191. + /* Round size to commit */
  5192. + commit_size = CEIL (to_commit, g_my_pagesize);
  5193. + }
  5194. + /* Append the new region to the list */
  5195. + if (! region_list_append (&g_last, base_reserved, reserve_size))
  5196. + goto sbrk_exit;
  5197. + /* Didn't we get contiguous memory? */
  5198. + if (! contiguous) {
  5199. + /* Recompute the size to commit */
  5200. + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
  5201. + /* Round size to commit */
  5202. + commit_size = CEIL (to_commit, g_my_pagesize);
  5203. + }
  5204. + }
  5205. + }
  5206. + /* Assert preconditions */
  5207. + assert ((unsigned) g_last->top_committed % g_pagesize == 0);
  5208. + assert (0 < commit_size && commit_size % g_pagesize == 0); {
  5209. + /* Commit this */
  5210. + void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
  5211. + MEM_COMMIT, PAGE_READWRITE);
  5212. + /* Check returned pointer for consistency */
  5213. + if (base_committed != g_last->top_committed)
  5214. + goto sbrk_exit;
  5215. + /* Assert postconditions */
  5216. + assert ((unsigned) base_committed % g_pagesize == 0);
  5217. +#ifdef TRACE
  5218. + printf ("Commit %p %d\n", base_committed, commit_size);
  5219. +#endif
  5220. + /* Adjust the regions commit top */
  5221. + g_last->top_committed = (char *) base_committed + commit_size;
  5222. + }
  5223. + }
  5224. + /* Adjust the regions allocation top */
  5225. + g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
  5226. + result = (char *) g_last->top_allocated - size;
  5227. + /* Deallocation requested? */
  5228. + } else if (size < 0) {
  5229. + long deallocate_size = - size;
  5230. + /* As long as we have a region to release */
  5231. + while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
  5232. + /* Get the size to release */
  5233. + long release_size = g_last->reserve_size;
  5234. + /* Get the base address */
  5235. + void *base_reserved = (char *) g_last->top_reserved - release_size;
  5236. + /* Assert preconditions */
  5237. + assert ((unsigned) base_reserved % g_regionsize == 0);
  5238. + assert (0 < release_size && release_size % g_regionsize == 0); {
  5239. + /* Release this */
  5240. + int rc = VirtualFree (base_reserved, 0,
  5241. + MEM_RELEASE);
  5242. + /* Check returned code for consistency */
  5243. + if (! rc)
  5244. + goto sbrk_exit;
  5245. +#ifdef TRACE
  5246. + printf ("Release %p %d\n", base_reserved, release_size);
  5247. +#endif
  5248. + }
  5249. + /* Adjust deallocation size */
  5250. + deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
  5251. + /* Remove the old region from the list */
  5252. + if (! region_list_remove (&g_last))
  5253. + goto sbrk_exit;
  5254. + } {
  5255. + /* Compute the size to decommit */
  5256. + long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
  5257. + if (to_decommit >= g_my_pagesize) {
  5258. + /* Compute the size to decommit */
  5259. + long decommit_size = FLOOR (to_decommit, g_my_pagesize);
  5260. + /* Compute the base address */
  5261. + void *base_committed = (char *) g_last->top_committed - decommit_size;
  5262. + /* Assert preconditions */
  5263. + assert ((unsigned) base_committed % g_pagesize == 0);
  5264. + assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
  5265. + /* Decommit this */
  5266. + int rc = VirtualFree ((char *) base_committed, decommit_size,
  5267. + MEM_DECOMMIT);
  5268. + /* Check returned code for consistency */
  5269. + if (! rc)
  5270. + goto sbrk_exit;
  5271. +#ifdef TRACE
  5272. + printf ("Decommit %p %d\n", base_committed, decommit_size);
  5273. +#endif
  5274. + }
  5275. + /* Adjust deallocation size and regions commit and allocate top */
  5276. + deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
  5277. + g_last->top_committed = base_committed;
  5278. + g_last->top_allocated = base_committed;
  5279. + }
  5280. + }
  5281. + /* Adjust regions allocate top */
  5282. + g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
  5283. + /* Check for underflow */
  5284. + if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
  5285. + g_last->top_allocated > g_last->top_committed) {
  5286. + /* Adjust regions allocate top */
  5287. + g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
  5288. + goto sbrk_exit;
  5289. + }
  5290. + result = g_last->top_allocated;
  5291. + }
  5292. + /* Assert invariants */
  5293. + assert (g_last);
  5294. + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
  5295. + g_last->top_allocated <= g_last->top_committed);
  5296. + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
  5297. + g_last->top_committed <= g_last->top_reserved &&
  5298. + (unsigned) g_last->top_committed % g_pagesize == 0);
  5299. + assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
  5300. + assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
  5301. +
  5302. +sbrk_exit:
  5303. +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5304. + /* Release spin lock */
  5305. + slrelease (&g_sl);
  5306. +#endif
  5307. + return result;
  5308. +}
  5309. +
  5310. +/* mmap for windows */
  5311. +static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
  5312. + static long g_pagesize;
  5313. + static long g_regionsize;
  5314. +#ifdef TRACE
  5315. + printf ("mmap %d\n", size);
  5316. +#endif
  5317. +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5318. + /* Wait for spin lock */
  5319. + slwait (&g_sl);
  5320. +#endif
  5321. + /* First time initialization */
  5322. + if (! g_pagesize)
  5323. + g_pagesize = getpagesize ();
  5324. + if (! g_regionsize)
  5325. + g_regionsize = getregionsize ();
  5326. + /* Assert preconditions */
  5327. + assert ((unsigned) ptr % g_regionsize == 0);
  5328. + assert (size % g_pagesize == 0);
  5329. + /* Allocate this */
  5330. + ptr = VirtualAlloc (ptr, size,
  5331. + MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
  5332. + if (! ptr) {
  5333. + ptr = (void *) MORECORE_FAILURE;
  5334. + goto mmap_exit;
  5335. + }
  5336. + /* Assert postconditions */
  5337. + assert ((unsigned) ptr % g_regionsize == 0);
  5338. +#ifdef TRACE
  5339. + printf ("Commit %p %d\n", ptr, size);
  5340. +#endif
  5341. +mmap_exit:
  5342. +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5343. + /* Release spin lock */
  5344. + slrelease (&g_sl);
  5345. +#endif
  5346. + return ptr;
  5347. +}
  5348. +
  5349. +/* munmap for windows */
  5350. +static long munmap (void *ptr, long size) {
  5351. + static long g_pagesize;
  5352. + static long g_regionsize;
  5353. + int rc = MUNMAP_FAILURE;
  5354. +#ifdef TRACE
  5355. + printf ("munmap %p %d\n", ptr, size);
  5356. +#endif
  5357. +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5358. + /* Wait for spin lock */
  5359. + slwait (&g_sl);
  5360. +#endif
  5361. + /* First time initialization */
  5362. + if (! g_pagesize)
  5363. + g_pagesize = getpagesize ();
  5364. + if (! g_regionsize)
  5365. + g_regionsize = getregionsize ();
  5366. + /* Assert preconditions */
  5367. + assert ((unsigned) ptr % g_regionsize == 0);
  5368. + assert (size % g_pagesize == 0);
  5369. + /* Free this */
  5370. + if (! VirtualFree (ptr, 0,
  5371. + MEM_RELEASE))
  5372. + goto munmap_exit;
  5373. + rc = 0;
  5374. +#ifdef TRACE
  5375. + printf ("Release %p %d\n", ptr, size);
  5376. +#endif
  5377. +munmap_exit:
  5378. +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5379. + /* Release spin lock */
  5380. + slrelease (&g_sl);
  5381. +#endif
  5382. + return rc;
  5383. +}
  5384. +
  5385. +static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed) {
  5386. + MEMORY_BASIC_INFORMATION memory_info;
  5387. + memory_info.BaseAddress = 0;
  5388. + *free = *reserved = *committed = 0;
  5389. + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
  5390. + switch (memory_info.State) {
  5391. + case MEM_FREE:
  5392. + *free += memory_info.RegionSize;
  5393. + break;
  5394. + case MEM_RESERVE:
  5395. + *reserved += memory_info.RegionSize;
  5396. + break;
  5397. + case MEM_COMMIT:
  5398. + *committed += memory_info.RegionSize;
  5399. + break;
  5400. + }
  5401. + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
  5402. + }
  5403. +}
  5404. +
  5405. +static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user) {
  5406. + if (whole) {
  5407. + __int64 creation64, exit64, kernel64, user64;
  5408. + int rc = GetProcessTimes (GetCurrentProcess (),
  5409. + (FILETIME *) &creation64,
  5410. + (FILETIME *) &exit64,
  5411. + (FILETIME *) &kernel64,
  5412. + (FILETIME *) &user64);
  5413. + if (! rc) {
  5414. + *kernel = 0;
  5415. + *user = 0;
  5416. + return FALSE;
  5417. + }
  5418. + *kernel = (unsigned long) (kernel64 / 10000);
  5419. + *user = (unsigned long) (user64 / 10000);
  5420. + return TRUE;
  5421. + } else {
  5422. + __int64 creation64, exit64, kernel64, user64;
  5423. + int rc = GetThreadTimes (GetCurrentThread (),
  5424. + (FILETIME *) &creation64,
  5425. + (FILETIME *) &exit64,
  5426. + (FILETIME *) &kernel64,
  5427. + (FILETIME *) &user64);
  5428. + if (! rc) {
  5429. + *kernel = 0;
  5430. + *user = 0;
  5431. + return FALSE;
  5432. + }
  5433. + *kernel = (unsigned long) (kernel64 / 10000);
  5434. + *user = (unsigned long) (user64 / 10000);
  5435. + return TRUE;
  5436. + }
  5437. +}
  5438. +
  5439. +#endif /* WIN32 */
  5440. +
  5441. +/* ------------------------------------------------------------
  5442. +History:
  5443. +
  5444. + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
  5445. + * Introduce independent_comalloc and independent_calloc.
  5446. + Thanks to Michael Pachos for motivation and help.
  5447. + * Make optional .h file available
  5448. + * Allow > 2GB requests on 32bit systems.
  5449. + * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  5450. + Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  5451. + and Anonymous.
  5452. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  5453. + helping test this.)
  5454. + * memalign: check alignment arg
  5455. + * realloc: don't try to shift chunks backwards, since this
  5456. + leads to more fragmentation in some programs and doesn't
  5457. + seem to help in any others.
  5458. + * Collect all cases in malloc requiring system memory into sYSMALLOc
  5459. + * Use mmap as backup to sbrk
  5460. + * Place all internal state in malloc_state
  5461. + * Introduce fastbins (although similar to 2.5.1)
  5462. + * Many minor tunings and cosmetic improvements
  5463. + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  5464. + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5465. + Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5466. + * Include errno.h to support default failure action.
  5467. +
  5468. + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
  5469. + * return null for negative arguments
  5470. + * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5471. + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5472. + (e.g. WIN32 platforms)
  5473. + * Cleanup header file inclusion for WIN32 platforms
  5474. + * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5475. + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5476. + memory allocation routines
  5477. + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5478. + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5479. + usage of 'assert' in non-WIN32 code
  5480. + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5481. + avoid infinite loop
  5482. + * Always call 'fREe()' rather than 'free()'
  5483. +
  5484. + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
  5485. + * Fixed ordering problem with boundary-stamping
  5486. +
  5487. + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
  5488. + * Added pvalloc, as recommended by H.J. Liu
  5489. + * Added 64bit pointer support mainly from Wolfram Gloger
  5490. + * Added anonymously donated WIN32 sbrk emulation
  5491. + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5492. + * malloc_extend_top: fix mask error that caused wastage after
  5493. + foreign sbrks
  5494. + * Add linux mremap support code from HJ Liu
  5495. +
  5496. + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
  5497. + * Integrated most documentation with the code.
  5498. + * Add support for mmap, with help from
  5499. + Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5500. + * Use last_remainder in more cases.
  5501. + * Pack bins using idea from colin@nyx10.cs.du.edu
  5502. + * Use ordered bins instead of best-fit threshold
  5503. + * Eliminate block-local decls to simplify tracing and debugging.
  5504. + * Support another case of realloc via move into top
  5505. + * Fix error occurring when initial sbrk_base not word-aligned.
  5506. + * Rely on page size for units instead of SBRK_UNIT to
  5507. + avoid surprises about sbrk alignment conventions.
  5508. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5509. + (raymond@es.ele.tue.nl) for the suggestion.
  5510. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5511. + * More precautions for cases where other routines call sbrk,
  5512. + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5513. + * Added macros etc., allowing use in linux libc from
  5514. + H.J. Lu (hjl@gnu.ai.mit.edu)
  5515. + * Inverted this history list
  5516. +
  5517. + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
  5518. + * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5519. + * Removed all preallocation code since under current scheme
  5520. + the work required to undo bad preallocations exceeds
  5521. + the work saved in good cases for most test programs.
  5522. + * No longer use return list or unconsolidated bins since
  5523. + no scheme using them consistently outperforms those that don't
  5524. + given above changes.
  5525. + * Use best fit for very large chunks to prevent some worst-cases.
  5526. + * Added some support for debugging
  5527. +
  5528. + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
  5529. + * Removed footers when chunks are in use. Thanks to
  5530. + Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5531. +
  5532. + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
  5533. + * Added malloc_trim, with help from Wolfram Gloger
  5534. + (wmglo@Dent.MED.Uni-Muenchen.DE).
  5535. +
  5536. + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
  5537. +
  5538. + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
  5539. + * realloc: try to expand in both directions
  5540. + * malloc: swap order of clean-bin strategy;
  5541. + * realloc: only conditionally expand backwards
  5542. + * Try not to scavenge used bins
  5543. + * Use bin counts as a guide to preallocation
  5544. + * Occasionally bin return list chunks in first scan
  5545. + * Add a few optimizations from colin@nyx10.cs.du.edu
  5546. +
  5547. + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
  5548. + * faster bin computation & slightly different binning
  5549. + * merged all consolidations to one part of malloc proper
  5550. + (eliminating old malloc_find_space & malloc_clean_bin)
  5551. + * Scan 2 returns chunks (not just 1)
  5552. + * Propagate failure in realloc if malloc returns 0
  5553. + * Add stuff to allow compilation on non-ANSI compilers
  5554. + from kpv@research.att.com
  5555. +
  5556. + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
  5557. + * removed potential for odd address access in prev_chunk
  5558. + * removed dependency on getpagesize.h
  5559. + * misc cosmetics and a bit more internal documentation
  5560. + * anticosmetics: mangled names in macros to evade debugger strangeness
  5561. + * tested on sparc, hp-700, dec-mips, rs6000
  5562. + with gcc & native cc (hp, dec only) allowing
  5563. + Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5564. +
  5565. + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
  5566. + * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5567. + structure of old version, but most details differ.)
  5568. +
  5569. +*/
  5570. +
  5571. +#ifdef USE_PUBLIC_MALLOC_WRAPPERS
  5572. +
  5573. +#ifndef KDE_MALLOC_FULL
  5574. +
  5575. +#ifdef KDE_MALLOC_GLIBC
  5576. +#include "glibc.h"
  5577. +#else
  5578. +/* cannot use dlsym(RTLD_NEXT,...) here, it calls malloc()*/
  5579. +#error Unknown libc
  5580. +#endif
  5581. +
  5582. +/* 0 - uninitialized
  5583. + 1 - this malloc
  5584. + 2 - standard libc malloc*/
  5585. +extern char* getenv(const char*);
  5586. +static int malloc_type = 0;
  5587. +static void init_malloc_type(void)
  5588. + {
  5589. + const char* const env = getenv( "KDE_MALLOC" );
  5590. + if( env == NULL )
  5591. + malloc_type = 1;
  5592. + else if( env[ 0 ] == '0' || env[ 0 ] == 'n' || env[ 0 ] == 'N' )
  5593. + malloc_type = 2;
  5594. + else
  5595. + malloc_type = 1;
  5596. + }
  5597. +
  5598. +#endif
  5599. +
  5600. +Void_t* public_mALLOc(size_t bytes) {
  5601. +#ifndef KDE_MALLOC_FULL
  5602. + if( malloc_type == 1 )
  5603. + {
  5604. +#endif
  5605. + Void_t* m;
  5606. + if (MALLOC_PREACTION != 0) {
  5607. + return 0;
  5608. + }
  5609. + m = mALLOc(bytes);
  5610. + if (MALLOC_POSTACTION != 0) {
  5611. + }
  5612. + return m;
  5613. +#ifndef KDE_MALLOC_FULL
  5614. + }
  5615. + if( malloc_type == 2 )
  5616. + return libc_malloc( bytes );
  5617. + init_malloc_type();
  5618. + return public_mALLOc( bytes );
  5619. +#endif
  5620. +}
  5621. +
  5622. +void public_fREe(Void_t* m) {
  5623. +#ifndef KDE_MALLOC_FULL
  5624. + if( malloc_type == 1 )
  5625. + {
  5626. +#endif
  5627. + if (MALLOC_PREACTION != 0) {
  5628. + return;
  5629. + }
  5630. + fREe(m);
  5631. + if (MALLOC_POSTACTION != 0) {
  5632. + }
  5633. +#ifndef KDE_MALLOC_FULL
  5634. + return;
  5635. + }
  5636. + if( malloc_type == 2 )
  5637. + {
  5638. + libc_free( m );
  5639. + return;
  5640. + }
  5641. + init_malloc_type();
  5642. + public_fREe( m );
  5643. +#endif
  5644. +}
  5645. +
  5646. +Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
  5647. +#ifndef KDE_MALLOC_FULL
  5648. + if( malloc_type == 1 )
  5649. + {
  5650. +#endif
  5651. + if (MALLOC_PREACTION != 0) {
  5652. + return 0;
  5653. + }
  5654. + m = rEALLOc(m, bytes);
  5655. + if (MALLOC_POSTACTION != 0) {
  5656. + }
  5657. + return m;
  5658. +#ifndef KDE_MALLOC_FULL
  5659. + }
  5660. + if( malloc_type == 2 )
  5661. + return libc_realloc( m, bytes );
  5662. + init_malloc_type();
  5663. + return public_rEALLOc( m, bytes );
  5664. +#endif
  5665. +}
  5666. +
  5667. +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
  5668. +#ifndef KDE_MALLOC_FULL
  5669. + if( malloc_type == 1 )
  5670. + {
  5671. +#endif
  5672. + Void_t* m;
  5673. + if (MALLOC_PREACTION != 0) {
  5674. + return 0;
  5675. + }
  5676. + m = mEMALIGn(alignment, bytes);
  5677. + if (MALLOC_POSTACTION != 0) {
  5678. + }
  5679. + return m;
  5680. +#ifndef KDE_MALLOC_FULL
  5681. + }
  5682. + if( malloc_type == 2 )
  5683. + return libc_memalign( alignment, bytes );
  5684. + init_malloc_type();
  5685. + return public_mEMALIGn( alignment, bytes );
  5686. +#endif
  5687. +}
  5688. +
  5689. +Void_t* public_vALLOc(size_t bytes) {
  5690. +#ifndef KDE_MALLOC_FULL
  5691. + if( malloc_type == 1 )
  5692. + {
  5693. +#endif
  5694. + Void_t* m;
  5695. + if (MALLOC_PREACTION != 0) {
  5696. + return 0;
  5697. + }
  5698. + m = vALLOc(bytes);
  5699. + if (MALLOC_POSTACTION != 0) {
  5700. + }
  5701. + return m;
  5702. +#ifndef KDE_MALLOC_FULL
  5703. + }
  5704. + if( malloc_type == 2 )
  5705. + return libc_valloc( bytes );
  5706. + init_malloc_type();
  5707. + return public_vALLOc( bytes );
  5708. +#endif
  5709. +}
  5710. +
  5711. +Void_t* public_pVALLOc(size_t bytes) {
  5712. +#ifndef KDE_MALLOC_FULL
  5713. + if( malloc_type == 1 )
  5714. + {
  5715. +#endif
  5716. + Void_t* m;
  5717. + if (MALLOC_PREACTION != 0) {
  5718. + return 0;
  5719. + }
  5720. + m = pVALLOc(bytes);
  5721. + if (MALLOC_POSTACTION != 0) {
  5722. + }
  5723. + return m;
  5724. +#ifndef KDE_MALLOC_FULL
  5725. + }
  5726. + if( malloc_type == 2 )
  5727. + return libc_pvalloc( bytes );
  5728. + init_malloc_type();
  5729. + return public_pVALLOc( bytes );
  5730. +#endif
  5731. +}
  5732. +
  5733. +Void_t* public_cALLOc(size_t n, size_t elem_size) {
  5734. +#ifndef KDE_MALLOC_FULL
  5735. + if( malloc_type == 1 )
  5736. + {
  5737. +#endif
  5738. + Void_t* m;
  5739. + if (MALLOC_PREACTION != 0) {
  5740. + return 0;
  5741. + }
  5742. + m = cALLOc(n, elem_size);
  5743. + if (MALLOC_POSTACTION != 0) {
  5744. + }
  5745. + return m;
  5746. +#ifndef KDE_MALLOC_FULL
  5747. + }
  5748. + if( malloc_type == 2 )
  5749. + return libc_calloc( n, elem_size );
  5750. + init_malloc_type();
  5751. + return public_cALLOc( n, elem_size );
  5752. +#endif
  5753. +}
  5754. +
  5755. +void public_cFREe(Void_t* m) {
  5756. +#ifndef KDE_MALLOC_FULL
  5757. + if( malloc_type == 1 )
  5758. + {
  5759. +#endif
  5760. + if (MALLOC_PREACTION != 0) {
  5761. + return;
  5762. + }
  5763. + cFREe(m);
  5764. + if (MALLOC_POSTACTION != 0) {
  5765. + }
  5766. +#ifndef KDE_MALLOC_FULL
  5767. + return;
  5768. + }
  5769. + if( malloc_type == 2 )
  5770. + {
  5771. + libc_cfree( m );
  5772. + return;
  5773. + }
  5774. + init_malloc_type();
  5775. + public_cFREe( m );
  5776. +#endif
  5777. +}
  5778. +
  5779. +struct mallinfo public_mALLINFo() {
  5780. +#ifndef KDE_MALLOC_FULL
  5781. + if( malloc_type == 1 )
  5782. + {
  5783. +#endif
  5784. + struct mallinfo m;
  5785. + if (MALLOC_PREACTION != 0) {
  5786. + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  5787. + return nm;
  5788. + }
  5789. + m = mALLINFo();
  5790. + if (MALLOC_POSTACTION != 0) {
  5791. + }
  5792. + return m;
  5793. +#ifndef KDE_MALLOC_FULL
  5794. + }
  5795. + if( malloc_type == 2 )
  5796. + return libc_mallinfo();
  5797. + init_malloc_type();
  5798. + return public_mALLINFo();
  5799. +#endif
  5800. +}
  5801. +
  5802. +int public_mALLOPt(int p, int v) {
  5803. +#ifndef KDE_MALLOC_FULL
  5804. + if( malloc_type == 1 )
  5805. + {
  5806. +#endif
  5807. + int result;
  5808. + if (MALLOC_PREACTION != 0) {
  5809. + return 0;
  5810. + }
  5811. + result = mALLOPt(p, v);
  5812. + if (MALLOC_POSTACTION != 0) {
  5813. + }
  5814. + return result;
  5815. +#ifndef KDE_MALLOC_FULL
  5816. + }
  5817. + if( malloc_type == 2 )
  5818. + return libc_mallopt( p, v );
  5819. + init_malloc_type();
  5820. + return public_mALLOPt( p, v );
  5821. +#endif
  5822. +}
  5823. +#endif
  5824. +
  5825. +int
  5826. +posix_memalign (void **memptr, size_t alignment, size_t size)
  5827. +{
  5828. + void *mem;
  5829. +
  5830. + /* Test whether the SIZE argument is valid. It must be a power of
  5831. + two multiple of sizeof (void *). */
  5832. + if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
  5833. + return EINVAL;
  5834. +
  5835. + mem = memalign (alignment, size);
  5836. +
  5837. + if (mem != NULL) {
  5838. + *memptr = mem;
  5839. + return 0;
  5840. + }
  5841. +
  5842. + return ENOMEM;
  5843. +}
  5844. +
  5845. +#else
  5846. +/* Some linkers (Solaris 2.6) don't like empty archives, but for
  5847. + easier Makefile's we want to link against libklmalloc.la every time,
  5848. + so simply make it non-empty. */
  5849. +void kde_malloc_dummy_function ()
  5850. +{
  5851. + return;
  5852. +}
  5853. +#endif
  5854. diff -Nupr a/src/corelib/arch/avr32/qatomic.cpp b/src/corelib/arch/avr32/qatomic.cpp
  5855. --- a/src/corelib/arch/avr32/qatomic.cpp 1970-01-01 01:00:00.000000000 +0100
  5856. +++ b/src/corelib/arch/avr32/qatomic.cpp 2006-07-26 11:02:43.000000000 +0200
  5857. @@ -0,0 +1,24 @@
  5858. +/****************************************************************************
  5859. +**
  5860. +** Copyright (C) 1992-2006 Trolltech ASA. All rights reserved.
  5861. +**
  5862. +** This file is part of the QtCore module of the Qt Toolkit.
  5863. +**
  5864. +** Licensees holding valid Qt Preview licenses may use this file in
  5865. +** accordance with the Qt Preview License Agreement provided with the
  5866. +** Software.
  5867. +**
  5868. +** See http://www.trolltech.com/pricing.html or email sales@trolltech.com for
  5869. +** information about Qt Commercial License Agreements.
  5870. +**
  5871. +** Contact info@trolltech.com if any conditions of this licensing are
  5872. +** not clear to you.
  5873. +**
  5874. +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
  5875. +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  5876. +**
  5877. +****************************************************************************/
  5878. +
  5879. +#include "QtCore/qatomic_avr32.h"
  5880. +
  5881. +Q_CORE_EXPORT long q_atomic_lock = 0;
  5882. diff -Nupr a/src/corelib/arch/qatomic_arch.h b/src/corelib/arch/qatomic_arch.h
  5883. --- a/src/corelib/arch/qatomic_arch.h 2006-06-30 09:49:44.000000000 +0200
  5884. +++ b/src/corelib/arch/qatomic_arch.h 2006-07-27 12:42:58.000000000 +0200
  5885. @@ -32,6 +32,8 @@ QT_BEGIN_HEADER
  5886. # include "QtCore/qatomic_alpha.h"
  5887. #elif defined(QT_ARCH_ARM)
  5888. # include "QtCore/qatomic_arm.h"
  5889. +#elif defined(QT_ARCH_AVR32)
  5890. +# include "QtCore/qatomic_avr32.h"
  5891. #elif defined(QT_ARCH_BOUNDSCHECKER)
  5892. # include "QtCore/qatomic_boundschecker.h"
  5893. #elif defined(QT_ARCH_GENERIC)
  5894. diff -Nupr a/src/corelib/arch/qatomic_avr32.h b/src/corelib/arch/qatomic_avr32.h
  5895. --- a/src/corelib/arch/qatomic_avr32.h 1970-01-01 01:00:00.000000000 +0100
  5896. +++ b/src/corelib/arch/qatomic_avr32.h 2006-07-28 10:30:08.000000000 +0200
  5897. @@ -0,0 +1,113 @@
  5898. +/****************************************************************************
  5899. +**
  5900. +** Copyright (C) 1992-2006 Trolltech ASA. All rights reserved.
  5901. +**
  5902. +** This file is part of the QtCore module of the Qt Toolkit.
  5903. +**
  5904. +** Licensees holding valid Qt Preview licenses may use this file in
  5905. +** accordance with the Qt Preview License Agreement provided with the
  5906. +** Software.
  5907. +**
  5908. +** See http://www.trolltech.com/pricing.html or email sales@trolltech.com for
  5909. +** information about Qt Commercial License Agreements.
  5910. +**
  5911. +** Contact info@trolltech.com if any conditions of this licensing are
  5912. +** not clear to you.
  5913. +**
  5914. +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
  5915. +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  5916. +**
  5917. +****************************************************************************/
  5918. +
  5919. +#ifndef AVR32_QATOMIC_H
  5920. +#define AVR32_QATOMIC_H
  5921. +
  5922. +#include <QtCore/qglobal.h>
  5923. +
  5924. +QT_BEGIN_HEADER
  5925. +
  5926. +extern Q_CORE_EXPORT long q_atomic_lock;
  5927. +
  5928. +inline long q_atomic_swp(volatile long *ptr, long newval)
  5929. +{
  5930. + register int ret;
  5931. + asm volatile("xchg %0,%1,%2"
  5932. + : "=&r"(ret)
  5933. + : "r"(ptr), "r"(newval)
  5934. + : "memory", "cc");
  5935. + return ret;
  5936. +}
  5937. +
  5938. +inline int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval)
  5939. +{
  5940. + int ret = 0;
  5941. + while (q_atomic_swp(&q_atomic_lock, ~0) != 0);
  5942. + if (*ptr == expected) {
  5943. + *ptr = newval;
  5944. + ret = 1;
  5945. + }
  5946. + q_atomic_swp(&q_atomic_lock, 0);
  5947. + return ret;
  5948. +}
  5949. +
  5950. +inline int q_atomic_test_and_set_acquire_int(volatile int *ptr, int expected, int newval)
  5951. +{
  5952. + return q_atomic_test_and_set_int(ptr, expected, newval);
  5953. +}
  5954. +
  5955. +inline int q_atomic_test_and_set_release_int(volatile int *ptr, int expected, int newval)
  5956. +{
  5957. + return q_atomic_test_and_set_int(ptr, expected, newval);
  5958. +}
  5959. +
  5960. +inline int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval)
  5961. +{
  5962. + int ret = 0;
  5963. + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
  5964. + if (*reinterpret_cast<void * volatile *>(ptr) == expected) {
  5965. + *reinterpret_cast<void * volatile *>(ptr) = newval;
  5966. + ret = 1;
  5967. + }
  5968. + q_atomic_swp(&q_atomic_lock, 0);
  5969. + return ret;
  5970. +}
  5971. +
  5972. +inline int q_atomic_increment(volatile int *ptr)
  5973. +{
  5974. + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
  5975. + int originalValue = *ptr;
  5976. + *ptr = originalValue + 1;
  5977. + q_atomic_swp(&q_atomic_lock, 0);
  5978. + return originalValue != -1;
  5979. +}
  5980. +
  5981. +inline int q_atomic_decrement(volatile int *ptr)
  5982. +{
  5983. + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
  5984. + int originalValue = *ptr;
  5985. + *ptr = originalValue - 1;
  5986. + q_atomic_swp(&q_atomic_lock, 0);
  5987. + return originalValue != 1;
  5988. +}
  5989. +
  5990. +inline int q_atomic_set_int(volatile int *ptr, int newval)
  5991. +{
  5992. + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
  5993. + int originalValue = *ptr;
  5994. + *ptr = newval;
  5995. + q_atomic_swp(&q_atomic_lock, 0);
  5996. + return originalValue;
  5997. +}
  5998. +
  5999. +inline void *q_atomic_set_ptr(volatile void *ptr, void *newval)
  6000. +{
  6001. + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
  6002. + void *originalValue = *reinterpret_cast<void * volatile *>(ptr);
  6003. + *reinterpret_cast<void * volatile *>(ptr) = newval;
  6004. + q_atomic_swp(&q_atomic_lock, 0);
  6005. + return originalValue;
  6006. +}
  6007. +
  6008. +QT_END_HEADER
  6009. +
  6010. +#endif // AVR32_QATOMIC_H
  6011. diff -Nupr a/src/corelib/io/qfilesystemwatcher_inotify.cpp b/src/corelib/io/qfilesystemwatcher_inotify.cpp
  6012. --- a/src/corelib/io/qfilesystemwatcher_inotify.cpp 2006-06-30 09:49:45.000000000 +0200
  6013. +++ b/src/corelib/io/qfilesystemwatcher_inotify.cpp 2006-07-27 13:24:27.000000000 +0200
  6014. @@ -72,6 +72,10 @@
  6015. # define __NR_inotify_init 316
  6016. # define __NR_inotify_add_watch 317
  6017. # define __NR_inotify_rm_watch 318
  6018. +#elif defined (__avr32__)
  6019. +# define __NR_inotify_init 240
  6020. +# define __NR_inotify_add_watch 241
  6021. +# define __NR_inotify_rm_watch 242
  6022. #elif defined (__SH4__)
  6023. # define __NR_inotify_init 290
  6024. # define __NR_inotify_add_watch 291
  6025. diff -uprN a/mkspecs/qws/linux-avr32-g++/qmake.conf b/mkspecs/qws/linux-avr32-g++/qmake.conf
  6026. --- a/mkspecs/qws/linux-avr32-g++/qmake.conf 1970-01-01 01:00:00.000000000 +0100
  6027. +++ b/mkspecs/qws/linux-avr32-g++/qmake.conf 2006-08-01 08:47:12.000000000 +0200
  6028. @@ -0,0 +1,85 @@
  6029. +#
  6030. +# qmake configuration for linux-g++ using the avr32-linux-g++ crosscompiler
  6031. +#
  6032. +
  6033. +MAKEFILE_GENERATOR = UNIX
  6034. +TEMPLATE = app
  6035. +CONFIG += qt warn_on release link_prl
  6036. +QT += core gui network
  6037. +QMAKE_INCREMENTAL_STYLE = sublib
  6038. +
  6039. +QMAKE_CC = avr32-linux-gcc
  6040. +QMAKE_LEX = flex
  6041. +QMAKE_LEXFLAGS =
  6042. +QMAKE_YACC = yacc
  6043. +QMAKE_YACCFLAGS = -d
  6044. +QMAKE_CFLAGS = -pipe
  6045. +QMAKE_CFLAGS_WARN_ON = -Wall -W
  6046. +QMAKE_CFLAGS_WARN_OFF =
  6047. +QMAKE_CFLAGS_RELEASE = -O2
  6048. +QMAKE_CFLAGS_DEBUG = -g -O2
  6049. +QMAKE_CFLAGS_SHLIB = -fPIC
  6050. +QMAKE_CFLAGS_YACC = -Wno-unused -Wno-parentheses
  6051. +QMAKE_CFLAGS_THREAD = -D_REENTRANT
  6052. +QMAKE_CFLAGS_HIDESYMS = -fvisibility=hidden
  6053. +
  6054. +QMAKE_CXX = avr32-linux-g++
  6055. +QMAKE_CXXFLAGS = $$QMAKE_CFLAGS -fno-exceptions
  6056. +QMAKE_CXXFLAGS_WARN_ON = $$QMAKE_CFLAGS_WARN_ON
  6057. +QMAKE_CXXFLAGS_WARN_OFF = $$QMAKE_CFLAGS_WARN_OFF
  6058. +QMAKE_CXXFLAGS_RELEASE = $$QMAKE_CFLAGS_RELEASE
  6059. +QMAKE_CXXFLAGS_DEBUG = $$QMAKE_CFLAGS_DEBUG
  6060. +QMAKE_CXXFLAGS_SHLIB = $$QMAKE_CFLAGS_SHLIB
  6061. +QMAKE_CXXFLAGS_YACC = $$QMAKE_CFLAGS_YACC
  6062. +QMAKE_CXXFLAGS_THREAD = $$QMAKE_CFLAGS_THREAD
  6063. +QMAKE_CXXFLAGS_HIDESYMS = $$QMAKE_CFLAGS_HIDESYMS -fvisibility-inlines-hidden
  6064. +
  6065. +QMAKE_INCDIR =
  6066. +QMAKE_LIBDIR =
  6067. +QMAKE_INCDIR_X11 =
  6068. +QMAKE_LIBDIR_X11 =
  6069. +QMAKE_INCDIR_QT = $$[QT_INSTALL_HEADERS]
  6070. +QMAKE_LIBDIR_QT = $$[QT_INSTALL_LIBS]
  6071. +QMAKE_INCDIR_OPENGL =
  6072. +QMAKE_LIBDIR_OPENGL =
  6073. +QMAKE_INCDIR_QTOPIA = $(QPEDIR)/include
  6074. +QMAKE_LIBDIR_QTOPIA = $(QPEDIR)/lib
  6075. +
  6076. +QMAKE_LINK = avr32-linux-g++
  6077. +QMAKE_LINK_SHLIB = avr32-linux-g++
  6078. +QMAKE_LFLAGS =
  6079. +QMAKE_LFLAGS_RELEASE =
  6080. +QMAKE_LFLAGS_DEBUG =
  6081. +QMAKE_LFLAGS_SHLIB = -shared
  6082. +QMAKE_LFLAGS_PLUGIN = $$QMAKE_LFLAGS_SHLIB
  6083. +QMAKE_LFLAGS_SONAME = -Wl,-soname,
  6084. +QMAKE_LFLAGS_THREAD =
  6085. +QMAKE_RPATH = -Wl,-rpath,
  6086. +
  6087. +QMAKE_LIBS =
  6088. +QMAKE_LIBS_DYNLOAD = -ldl
  6089. +QMAKE_LIBS_X11 =
  6090. +QMAKE_LIBS_X11SM =
  6091. +QMAKE_LIBS_QT = -lqte
  6092. +QMAKE_LIBS_QT_THREAD = -lqte-mt
  6093. +QMAKE_LIBS_QT_OPENGL = -lqgl
  6094. +QMAKE_LIBS_QTOPIA = -lqpe -lqtopia
  6095. +QMAKE_LIBS_THREAD = -lpthread
  6096. +
  6097. +QMAKE_MOC = $$[QT_INSTALL_BINS]/moc
  6098. +QMAKE_UIC = $$[QT_INSTALL_BINS]/uic
  6099. +
  6100. +QMAKE_AR = avr32-linux-ar cqs
  6101. +QMAKE_RANLIB = avr32-linux-ranlib
  6102. +
  6103. +QMAKE_TAR = tar -cf
  6104. +QMAKE_GZIP = gzip -9f
  6105. +
  6106. +QMAKE_COPY = cp -f
  6107. +QMAKE_MOVE = mv -f
  6108. +QMAKE_DEL_FILE = rm -f
  6109. +QMAKE_DEL_DIR = rmdir
  6110. +QMAKE_STRIP = avr32-linux-strip
  6111. +QMAKE_CHK_DIR_EXISTS = test -d
  6112. +QMAKE_MKDIR = mkdir -p
  6113. +load(qt_config)
  6114. diff -uprN a/mkspecs/qws/linux-avr32-g++/qplatformdefs.h b/mkspecs/qws/linux-avr32-g++/qplatformdefs.h
  6115. --- a/mkspecs/qws/linux-avr32-g++/qplatformdefs.h 1970-01-01 01:00:00.000000000 +0100
  6116. +++ b/mkspecs/qws/linux-avr32-g++/qplatformdefs.h 2006-07-26 09:16:52.000000000 +0200
  6117. @@ -0,0 +1,22 @@
  6118. +/****************************************************************************
  6119. +**
  6120. +** Copyright (C) 1992-2006 Trolltech ASA. All rights reserved.
  6121. +**
  6122. +** This file is part of the qmake spec of the Qt Toolkit.
  6123. +**
  6124. +** Licensees holding valid Qt Preview licenses may use this file in
  6125. +** accordance with the Qt Preview License Agreement provided with the
  6126. +** Software.
  6127. +**
  6128. +** See http://www.trolltech.com/pricing.html or email sales@trolltech.com for
  6129. +** information about Qt Commercial License Agreements.
  6130. +**
  6131. +** Contact info@trolltech.com if any conditions of this licensing are
  6132. +** not clear to you.
  6133. +**
  6134. +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
  6135. +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  6136. +**
  6137. +****************************************************************************/
  6138. +
  6139. +#include "../../linux-g++/qplatformdefs.h"