; ; flex.s ; ; Flexible memory handling (MDW) ; ; © 1994-1998 Straylight ; ;----- Licensing note ------------------------------------------------------- ; ; This file is part of Straylight's flex. ; ; Flex is free software; you can redistribute it and/or modify ; it under the terms of the GNU General Public License as published by ; the Free Software Foundation; either version 2, or (at your option) ; any later version. ; ; Flex is distributed in the hope that it will be useful, ; but WITHOUT ANY WARRANTY; without even the implied warranty of ; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ; GNU General Public License for more details. ; ; You should have received a copy of the GNU General Public License ; along with Flex. If not, write to the Free Software Foundation, ; 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ;----- New unified version -------------------------------------------------- ; ; I'm finally fed up of maintaining four different versions of this code. ; From now on, there is only this one. ; ; Lots of options are supported: ; ; OPT_APCS Generate an APCS-compatible version ; OPT_SAPPHIRE Generate a Sapphire-compatible version ; OPT_STEEL Apply some STEEL-specific eccentricities ; OPT_STANDALONE Build a standalone assembler version (default) ; OPT_DUMP Generate flex_dump code ; OPT_DYNAREA Generate dynamic-area handling code ; OPT_STACK Generate relocation stack handling code ; OPT_DLL Generate absolute address relocation for DLL code ; OPT_ATEXIT Register cleanup function with `atexit' for DLL code ; ; [mdw] ;----- Set up some options -------------------------------------------------- MACRO DCLOPT $var [ :DEF:$var $var SETL {TRUE} | GBLL $var $var SETL {FALSE} ] MEND DCLOPT OPT_APCS DCLOPT OPT_SAPPHIRE DCLOPT OPT_STEEL DCLOPT OPT_STANDALONE DCLOPT OPT_DUMP DCLOPT OPT_DYNAREA DCLOPT OPT_STACK DCLOPT OPT_DLL DCLOPT OPT_ATEXIT [ :LNOT:OPT_APCS:LAND::LNOT:OPT_SAPPHIRE:LAND::LNOT:OPT_STANDALONE GBLL OPT_STANDALONE OPT_STANDALONE SETL {TRUE} ] ;----- Standard stuff ------------------------------------------------------- GET libs:header GET libs:swis ;----- External dependencies ------------------------------------------------ [ OPT_SAPPHIRE GET sapphire:fastMove GET sapphire:event GET sapphire:except GET sapphire:libOpts GET sapphire:roVersion GET sapphire:sapphire | IMPORT fastMove ] [ OPT_DLL:LAND:OPT_APCS IMPORT atexit ] ;----- Workspace macros ----------------------------------------------------- [ OPT_APCS MACRO $label WSPACE $addr,$reg LCLS r [ "$reg"="" r SETS "R12" | r SETS "$reg" ] ALIGN $label LDR $r,$addr [ OPT_DLL LDR R14,[R10,#-536] ADD $r,R14,$r ] MEND ] ;----- Main code ------------------------------------------------------------ [ OPT_SAPPHIRE AREA |Sapphire$$Code|,CODE,READONLY ] [ OPT_APCS AREA |C$$Code|,CODE,READONLY ] [ OPT_STANDALONE AREA |Straylight$$Code|,CODE,READONLY ] ; --- flex__setslot --- ; ; On entry: R0 == limit address of slot required, or -1 to read ; ; On exit: R0 == actual address (after update) ; R1 == limit requested (R0 on entry) ; ; Use: Sets the application's WimpSlot to a given value. The value ; is given as an address, rather than as a size, which is ; the more normal way of doing things. ; ; Since updated to cope with dynamic areas. flex__setslot ROUT STMFD R13!,{R2-R6,R14} ;Save some registers LDR R14,flex__flags ;Load interesting flags [ OPT_DYNAREA TST R14,#fFlag__dynArea ;Using a dynamic area? BNE %50flex__setslot ;Yes -- do different things ] ; --- Change the WimpSlot --- ; ; Be careful -- we may be sharing memory space with another ; application! MOV R5,R0 ;Look after my argument MOV R0,#14 ;Read app space value MOV R1,#0 ;Read, rather than write SWI XOS_ChangeEnvironment ;Read the value MOV R3,R1 ;Keep hold of app space size MOV R0,#0 ;Now read memory limit MOV R1,#0 ;Read again, not write SWI XOS_ChangeEnvironment ;Read memory limit MOV R6,R1 ;Look after memory limit CMP R6,R3 ;How does this shape up? MOVLT R1,R3 ;If too low, extend mem limit SWILT XOS_ChangeEnvironment ;Set memory limit CMP R5,#-1 ;Does he want to read it? MOVEQ R0,R5 ;Yes -- do that then SUBNE R0,R5,#&8000 ;Otherwise work out slot size MOV R1,#-1 ;Not interested in next slot SWI XWimp_SlotSize ;Change the WimpSlot value MOV R4,R0 ;Look after updated value CMP R6,R3 ;If we changed the mem limit MOVLT R1,R6 ;Put it back the way it was MOVLT R0,#0 ;Setting memory limit SWILT XOS_ChangeEnvironment ;Restore memory limit ADD R0,R4,#&8000 ;New value of WimpSlot MOV R1,R5 ;Return requested size too LDMFD R13!,{R2-R6,PC}^ ;Return to caller ; --- Change a dynamic area size --- [ OPT_DYNAREA 50flex__setslot MOV R2,R0 ;Look after address requested LDR R3,flex__end ;Find the end address SUBS R1,R2,R3 ;Work out the difference LDR R0,flex__dynArea ;Load dynamic area handle SWI XOS_ChangeDynamicArea ;Try and change the size ADDGT R0,R3,R1 ;Get new limit address SUBLE R0,R3,R1 ;Irritatingly positive... MOV R1,R2 ;And the caller's request LDMFD R13!,{R2-R6,PC}^ ;Return to caller ] LTORG ; --- flex__fixup --- ; ; On entry: -- ; ; On exit: -- ; ; Use: Goes off and fixes up all the anchors currently pointing at ; blocks in the heap flex__fixup ROUT STMFD R13!,{R0-R3,R14} ;Save some registers ; --- Set up for the fixup loop --- LDR R1,flex__base ;Find the base of the heap LDR R2,flex__free ;Find end of the blocks ; --- Now go through and fix things up --- 00flex__fixup CMP R1,R2 ;Have we reached the end yet? LDMGEFD R13!,{R0-R3,PC}^ ;Return to caller if so LDR R3,[R1,#flex__bkanchor] ;Find the pointer to anchor CMP R3,#0 ;Is the block currently free? ADDNE R14,R1,#flex__ohead ;No -- bump aver the overhead STRNE R14,[R3] ;And store in the anchor LDR R3,[R1,#flex__size] ;Find the block's size ADD R3,R3,#flex__ohead+7 ;Add on the extra overhead BIC R3,R3,#7 ;And word align the size ADD R1,R1,R3 ;Bump along to the next block B %00flex__fixup ;And fix that one up too LTORG ; --- flex__compact --- ; ; On entry: -- ; ; On exit: -- ; ; Use: Try to compact the heap by one iteration ; ; We troll through the blocks to find a free one and haul ; everything down a bit flex__compact ROUT STMFD R13!,{R0-R6,R14} ;Save some registers ; --- Set up for a loop through the data --- LDR R5,flex__base ;Find the beginning of flex LDR R1,flex__free ;Find the end too ; --- Find a free block --- 00flex__compact CMP R5,R1 ;Are we at the end yet? BGE %10flex__compact ;Yes -- the heap is compact LDR R2,[R5,#flex__bkanchor] ;Get the block's anchor addr CMP R2,#0 ;Is the block free? LDR R2,[R5,#flex__size] ;Get the block size ADD R2,R2,#flex__ohead+7 ;Add on the overhead bytes BIC R2,R2,#7 ;And word align the size ADDNE R5,R5,R2 ;No -- move on to next one BNE %00flex__compact ;And go round for another one ; --- We've found a free block --- 01flex__compact ADD R6,R5,R2 ;Point to the next block CMP R6,R1 ;Is that the end of the heap? BGE %05flex__compact ;Yes -- reduce the free ptr ; --- Check for two free blocks together --- LDR R0,[R6,#flex__bkanchor] ;Does this have an anchor? CMP R0,#0 ;Check if it's free BNE %02flex__compact ;No -- start swapping blocks ; --- Join two adjacent free blocks together --- LDR R0,[R6,#flex__size] ;Yes -- get its size ADD R2,R0,R2 ;Concatenate the two blocks ADD R2,R2,#flex__ohead+7 ;Add on the overhead bytes BIC R2,R2,#7 ;And word align the size B %01flex__compact ;And check again... ; --- There's a block to bring down --- 02flex__compact LDR R4,[R6,#flex__size] ;Get size of block to move ADD R4,R4,#flex__ohead+7 ;Add the flex overhead BIC R4,R4,#7 ;And word align the size MOVS R2,R4 ;This is the size to move MOV R0,R5 ;Where to move it to MOV R1,R6 ;Where it is right now BLNE fastMove ;Copy it down PDQ [ OPT_STACK BLNE flex__reloc ;Deal with the relocation ] ADD R0,R5,R4 ;Point after block we moved MOV R1,#0 ;Block doesn't have an anchor STR R1,[R0,#flex__bkanchor] ;Store that away for later SUB R1,R6,R5 ;Find the difference here SUB R1,R1,#flex__ohead ;Don't count this size here STR R1,[R0,#flex__size] ;Store the old size in free ; --- We need to fix up the block we moved --- LDR R0,[R5,#flex__bkanchor] ;Get the anchor pointer ADD R1,R5,#flex__ohead ;Point to the real data STR R1,[R0] ;Store client's new anchor ; --- That's it -- return to caller --- LDMFD R13!,{R0-R6,PC}^ ;Return to caller ; --- Merge the last block with the free area --- 05flex__compact STR R5,flex__free ;This is the new free area LDR R0,flex__chunk ;Get the machine page size SUB R0,R0,#1 ;Turn into a bitmask ADD R5,R5,R0 ;Align this to page boundary BIC R0,R5,R0 ;And finish off the align LDR R1,flex__end ;Find the end of the heap CMP R0,R1 ;Are these different? BLNE flex__setslot ;Yes -- free some memory STRNE R0,flex__end ;Store the end away ; --- That's it -- return --- LDMFD R13!,{R0-R6,PC}^ ;Return to caller ; --- There wasn't anything to do -- we're compacted --- 10flex__compact LDR R0,flex__flags ORR R0,R0,#fFlag__compact ;We are now compacted STR R0,flex__flags LDMFD R13!,{R0-R6,PC}^ ;Return to caller LTORG ; --- flex_reduce --- ; ; On entry: -- ; ; On exit: -- ; ; Use: Compacts the flex heap by one iteration. EXPORT flex_reduce flex_reduce ROUT STMFD R13!,{R0,R12,R14} ;Stack some registers WSPACE flex__wSpace ;Find my workspace ; --- Check if it's compacted --- LDR R0,flex__flags ;Get my nice flags word TST R0,#fFlag__compact ;Is the heap compacted? BLEQ flex__compact ;No -- compact it a bit LDMFD R13!,{R0,R12,PC}^ ;Return to caller now LTORG ; --- flex_compact --- ; ; On entry: -- ; ; On exit: -- ; ; Use: Completely compacts the flex heap. EXPORT flex_compact flex_compact ROUT STMFD R13!,{R0,R12,R14} ;Save some registers WSPACE flex__wSpace ;Find my workspace ; --- Compaction loop --- 00flex_compact LDR R0,flex__flags ;Get my nice flags word TST R0,#fFlag__compact ;Is the heap compacted? BLEQ flex__compact ;No -- compact another stage BEQ %00flex_compact ;And go round again ; --- The end -- return --- LDMFD R13!,{R0,R12,PC}^ ;Return to caller LTORG ; --- flex_free --- ; ; On entry: R0 == pointer to the flex anchor ; ; On exit: -- ; ; Use: Frees a flex block allocated by flex_alloc. EXPORT flex_free flex_free ROUT STMFD R13!,{R0,R12,R14} ;Save some registers WSPACE flex__wSpace ; --- Mark the block as being free --- LDR R14,[R0] ;Get pointer to actual block MOV R0,#0 STR R0,[R14,#flex__bkanchor-flex__ohead] ; --- Update the flags -- not compacted any more --- LDR R0,flex__flags ;Get my nice flags word BIC R0,R0,#fFlag__compact ;We are no longer compacted STR R0,flex__flags ;Store it back LDMFD R13!,{R0,R12,PC}^ ;Return to caller LTORG ; --- flex_alloc --- ; ; On entry: R0 == pointer to a flex anchor ; R1 == desired size of flex block ; ; On exit: Sapphire: CS if no memory could be allocated, CC otherwise ; APCS: R0 zero if no memory, nonzero otherwise ; ; Use: Allocates a block in the shifting heap. EXPORT flex_alloc flex_alloc ROUT [ OPT_APCS STMFD R13!,{R4,R5,R12,R14} ;Save some registers | STMFD R13!,{R0-R5,R12,R14} ;Save some registers ] WSPACE flex__wSpace ; --- Round up the size etc. --- MOV R4,R0 ;Keep the anchor pointer MOV R3,R1 ;Keep the actual size wanted ADD R5,R1,#flex__ohead+7 ;Add on overhead for flex BIC R5,R5,#7 ;And word-align the size ; --- See if there's enough space --- 00flex_alloc LDR R0,flex__free ;Get the free pointer LDR R2,flex__end ;And the end of the block SUB R1,R2,R0 ;How much room is left CMP R1,R5 ;Enough for the new block BGE %10flex_alloc ;Set up the block ; --- Not enough room in block - try to get some more --- ADD R0,R0,R5 ;Find the new slot limit BL flex__setslot ;Set up the new slot CMP R0,R1 ;Did we get enough STRGE R0,flex__end ;Yes -- remember the new end LDRGE R0,flex__free ;Find the free area again BGE %10flex_alloc ;And allocate the memory ; --- Can we compact the heap? --- LDR R0,flex__end ;Get the old heap extent BL flex__setslot ;Put the slot back again LDR R0,flex__flags ;Get my current flags TST R0,#fFlag__compact ;Is the heap compact? BLEQ flex_compact ;No -- really compact it BEQ %00flex_alloc ;And give it another go ; --- We couldn't get enough memory at all --- [ OPT_APCS MOV R0,#0 LDMFD R13!,{R4,R5,R12,PC}^ ;Restore registers | LDMFD R13!,{R0-R5,R12,R14} ;Restore registers ORRS PC,R14,#C_flag ;Set C flag to indicate fail ] ; --- Set up the block pointed to by R0 10flex_alloc STR R4,[R0,#flex__bkanchor] ;Set up the back anchor STR R3,[R0,#flex__size] ;Remember size of this block ADD R1,R0,#flex__ohead ;Point to real data block STR R1,[R4] ;Let user know where block is ADD R1,R0,R5 ;Get the new free pointer STR R1,flex__free ;Store the new free ptr ; --- Return to the user --- [ OPT_APCS MOV R0,#-1 LDMFD R13!,{R4,R5,R12,PC}^ | LDMFD R13!,{R0-R5,R12,R14} BICS PC,R14,#C_flag ] LTORG ; --- flex_size --- ; ; On entry: R0 == pointer to flex anchor ; ; On exit: R0 == size of allocated block ; ; Use: Reads the size of a flex block. EXPORT flex_size flex_size ROUT LDR R0,[R0] ;Get the flex block LDR R0,[R0,#flex__size-flex__ohead] MOVS PC,R14 LTORG ; --- flex_extend --- ; ; On entry: R0 == pointer to flex anchor ; R1 == new size of block to set ; ; On exit: CS if it failed due to lack of memory, CC otherwise ; ; Use: Alters the size of a block to the given value. EXPORT flex_extend flex_extend ROUT STMFD R13!,{R1,R2,R14} ; --- Be *very* careful to preserve the flags... --- MOV R2,R1 LDR R1,[R0] LDR R1,[R1,#flex__size-flex__ohead] SUB R2,R2,R1 BL flex_midExtend ; --- Note --- ; ; We preserved the flags above, and midExtend should do ; its bit to help, so we can just return with the flags ; set by midExtend. Easy, no? LDMFD R13!,{R1,R2,PC} LTORG ; --- flex_midExtend --- ; ; On entry: R0 == pointer to a flex anchor ; R1 == `at' -- position in block to extend from ; R2 == `by' -- how many bytes to extend (may be -ve) ; ; On exit: Sapphire: CS if not enough memory, CC otherwise ; APCS: R0 zero if not enough memory, nonzero otherwise ; ; Use: Either creates a gap in a block (by>0) or deletes bytes ; from a block. This is always done in such a way that the ; byte originally at offset `at' is now at offset `at'+`by'. EXPORT flex_midExtend EXPORT flex_midextend flex_midExtend ROUT flex_midextend ; --- A bit of clever setting up --- [ OPT_APCS TEQ R2,#0 ;Move by zero bytes? MOVEQ R0,#-1 ;Yes, it worked MOVEQS PC,R14 ;And return STMFD R13!,{R12,R14} ;Otherwise save registers | BIC R14,R14,#C_flag ;Clear C now STMFD R13!,{R12,R14} ;Save R14 on the stack TEQ R2,#0 ;Move by zero bytes? LDMEQFD R13!,{R12,PC}^ ;Yes -- don't bother then ] ; --- Save some more registers and find workspace --- WSPACE flex__wSpace ; --- Find out what we have to do depending on `by' --- CMP R2,#0 ;Is it +ve or -ve? BGT %50flex_midExtend ;If we must extend, do that ; --- We reduce the block size -- easy --- [ OPT_APCS STMFD R13!,{R4} | STMFD R13!,{R0-R4} ;Save some more registers ] MOV R3,R2 ;Keep the size safe MOV R14,R1 ;Keep `at' safe too LDR R4,[R0] ;Get the actual block ptr ADD R1,R4,R14 ;Copy from `at' ADD R0,R1,R2 ;Copy to `at'-|`by'| LDR R2,[R4,#flex__size-flex__ohead] SUBS R2,R2,R14 ;Size of block - `at' BLNE fastMove ;Copy the area down [ OPT_STACK BLNE flex__reloc ;And relocate the stack ] ; --- Find the new actual and logical sizes --- SUB R4,R4,#flex__ohead ;Point to my actual data LDR R0,[R4,#flex__size] ;Get the size of the block ADD R1,R0,R3 ;Find new adjusted size STR R1,[R4,#flex__size] ;This is new logical size ADD R0,R0,#flex__ohead+7 ;Add the overhead to both... ADD R1,R1,#flex__ohead+7 ;... of these sizes and... BIC R0,R0,#7 ;... align to heap... BIC R1,R1,#7 ;... granularity nicely ; --- If these are different, insert a free block --- SUBS R0,R0,R1 ;Is there space for free blk? BLE %00flex_midExtend ;No -- just wrap up nicely ; --- Insert the free block here --- ADD R2,R4,R1 ;Find the free block SUB R0,R0,#flex__ohead ;Subtract the overhead STR R0,[R2,#flex__size] ;And store `logical' size MOV R0,#0 ;This block has no owner STR R0,[R2,#flex__bkanchor] ;So store a null anchor ; --- There's a free block -- enable compaction --- LDR R1,flex__flags ;Load my current flags BIC R1,R1,#fFlag__compact ;We're not compact any more STR R1,flex__flags ;Store the flags back again ; --- Return to caller --- 00flex_midExtend [ OPT_APCS MOV R0,#-1 LDMFD R13!,{R4,R12,PC}^ | LDMFD R13!,{R0-R4,R12,PC}^ ;Smile, smile, smile ] ; --- Work out how much extra space we need --- 50flex_midExtend [ OPT_APCS STMFD R13!,{R4-R9} | STMFD R13!,{R0-R9} ;Save yet more registers ] ; --- Take copies of the arguments --- MOV R4,R0 MOV R5,R1 MOV R6,R2 LDR R7,[R4] ;Find the actual flex block SUB R7,R7,#flex__ohead ;Point to my data LDR R3,[R7,#flex__size] ;Find the size of the block ADD R2,R3,#7 ;Don't add in overhead BIC R2,R2,#7 ;How much space in this block SUB R1,R2,R3 ;R1 == dead space at the end SUBS R1,R6,R1 ;R1 == extra space needed ; --- Can we do it within the block? --- ADDLE R3,R3,R6 ;Increase the size STRLE R3,[R7,#flex__size] ;And store BLE %70flex_midExtend ;Yes -- just shuffle it about ; --- We need to find R1 more bytes from somewhere --- ; ; Our strategy here is fairly simple, really (although we ; could refine it a lot, I suppose). ; ; 1. Find as many free blocks at the end of the midExtend ; block as possible, join them all together and see if ; that will do. ; ; 2. If not, we just flex_alloc a block of the right size ; and shift everything skywards. ; --- This calls for some serious register allocation --- ; ; R1 == the amount of extra we need (round up to size) ; R2 == pointer to blocks for loop ; ; R4 == address of anchor of midExtend block ; R5 == point at which we need to extend it ; R6 == how much we extend it by ; R7 == pointer to the actual block ; R8 == pointer to next block after midExtend one ; R9 == the size we've accumulated so far ; --- Start the loop (I want to get off) --- ADD R1,R1,#7 ;Align the size to multiple BIC R1,R1,#7 MOV R9,#0 ;We haven't found any yet ADD R8,R7,R2 ;Point almost to next block ADD R8,R8,#flex__ohead ;Point to it properly MOV R2,R8 ;Start the loop here LDR R3,flex__free ;Find the free area start ; --- Find free blocks now --- 55flex_midExtend CMP R2,R3 ;Are we at the end yet? BGE %65flex_midExtend ;Oh, well -- we're stuffed LDR R14,[R2,#flex__bkanchor] ;Is this block free? CMP R14,#0 ;Quick check McCheck BNE %65flex_midExtend ;Oh, well -- we're stuffed LDR R14,[R2,#flex__size] ;Get the block's size ADD R14,R14,#flex__ohead+7 ;Add on the overhead area BIC R14,R14,#7 ;And align to doubleword ADD R9,R9,R14 ;Accumulate the block size CMP R9,R1 ;Have we got enough yet? BGE %60flex_midExtend ;That's it -- we've got it ADD R2,R2,R14 ;Move onto the next block B %55flex_midExtend ;And check the next one ; --- We got enough free blocks to save us --- 60flex_midExtend LDR R0,[R7,#flex__size] ;Get the current size of this ADD R0,R0,R6 ;Add on the extended size STR R0,[R7,#flex__size] ;And put it back again BEQ %70flex_midExtend ;If perfect fit, don't create ; --- Create a new free block to use up the space --- ADD R0,R8,R1 ;Point to the new free block SUB R2,R9,R1 ;Find out how much is over SUB R2,R2,#flex__ohead ;Don't want to count that STR R2,[R0,#flex__size] ;Store the size away MOV R2,#0 ;This block is not used STR R2,[R0,#flex__bkanchor] ;So don't give it an anchor B %70flex_midExtend ;And do the shuffling about ; --- There wasn't enough space there, so allocate more --- 65flex_midExtend MOV R9,R1 ;Look after the size we want SUB R13,R13,#4 ;Create a flex anchor MOV R0,R13 ;Point to it SUB R1,R1,#flex__ohead ;We'll overwrite the overhead BL flex_alloc ;Allocate some memory [ OPT_APCS CMP R0,#0 ;Did it fail? ADDEQ R13,R13,#1 ;Skip past stacked anchor LDMEQFD R13!,{R4-R9,R12,PC}^ | LDMCSFD R13!,{R0-R9,R12,R14} ;If it failed, unstack... ADDCS R13,R13,#1 ;Skip past stacked anchor ORRCSS PC,R14,#C_flag ;... and set carry ] LDR R3,[R13],#4 ;Get pointer to new block ; --- A reminder about the registers --- ; ; R0-R2 aren't interesting any more ; R3 points flex__ohead bytes ahead of old flex__free ; R4-R6 are still our arguments ; R7,R8 aren't exciting any more ; R9 is the amount of extra space we wanted ; --- Our block may have moved -- recalculate next ptr --- LDR R7,[R4] ;Point to the block SUB R7,R7,#flex__ohead ;Point to the overhead area LDR R8,[R7,#flex__size] ;Get the size of the block ADD R8,R8,#flex__ohead+7 ;Bump over the overhead BIC R8,R8,#7 ;And align to multiple of 8 ; --- Move all the other blocks up a bit --- ADD R1,R7,R8 ;Start moving from here ADD R0,R1,R9 ;Move it to here SUB R2,R3,#flex__ohead ;Find the old flex__free SUBS R2,R2,R1 ;Copy the right section up BLNE fastMove [ OPT_STACK BLNE flex__reloc ;And adjust any stacked ptrs ] ; --- Adjust the block size and anchors --- LDR R14,[R7,#flex__size] ;Get the size of this block ADD R14,R14,R6 ;Add on the extension STR R14,[R7,#flex__size] ;Store it back again BL flex__fixup ;Fix up all the anchors again ; Drop through to block shuffling ; --- Create the gap in the flex block --- 70flex_midExtend LDR R4,[R7,#flex__size] ;Get the size of the block SUB R4,R4,R6 ;Find the old length ADD R1,R7,#flex__ohead ;Point to the real data ADD R1,R1,R5 ;Find the `at' position ADD R0,R1,R6 ;Find the `at'+|`by'| posn SUBS R2,R4,R5 ;Find the length to move BLNE fastMove ;Do the move [ OPT_STACK BLNE flex__reloc ;And adjust any stacked ptrs ] [ OPT_APCS MOV R0,#-1 LDMFD R13!,{R4-R9,R12,PC}^ | LDMFD R13!,{R0-R9,R12,PC}^ ] LTORG ; --- flex_init --- ; ; On entry: R0 == pointer to dynamic area name, or zero ; R1 == maximum allowed size of the area ; (except Sapphire version) ; ; On exit: -- ; ; Use: Initialises the flex heap for use. EXPORT flex_init EXPORT flex_dinit flex_init ROUT [ OPT_STEEL MOV R0,#0 MOV R1,#1 ] flex_dinit STMFD R13!,{R0-R8,R12,R14} WSPACE flex__wSpace [ :LNOT:OPT_STANDALONE ; --- Prevent multiple initialisation --- LDR R14,flex__flags ;Find my flags word TST R14,#fFlag__inited ;Am I initialised yet? LDMNEFD R13!,{R0-R8,R12,PC}^ ;Yes -- return right now ] [ OPT_DYNAREA ; --- If this is Sapphire, find the options block --- [ OPT_SAPPHIRE BL rov_init ;Work out the RISC OS version BL rov_version ;Get the version CMP R0,#348 ;Is this RISC OS 3.5? BCC %10flex_init ;No -- skip ahead then LDR R0,flex__optName ;Get the magic marker word BL libOpts_find ;Try to find the options BCC %10flex_init ;Not there -- skip on then LDR R14,[R0,#0] ;Load the flags out TST R14,#1 ;Is the dynamic area flag on? BEQ %10flex_init ;No -- don't do this then ; --- See if we can create a dynamic area --- TST R14,#2 ;Specified area size? LDRNE R5,[R0,#4] ;Yes -- load from opts block MOVEQ R5,#16*1024*1024 ;16 meg maximum size LDR R8,sapph_appName ;Find the application name | ; --- If this is APCS, then use the arguments --- CMP R0,#0 ;Is a dynamic area wanted? CMPNE R1,#0 ;Just check for stupidity BEQ %10flex_init ;No -- then skip ahead MOV R8,R0 ;Find the name pointer MOV R5,R1 ;And the size requested MOV R0,#129 ;Find the OS version MOV R1,#0 ;Convoluted call for this... MOV R2,#255 ;No idea why SWI OS_Byte ;Call the operating system CMP R1,#&A5 ;Is it late enough? BCC %10flex_init ;No -- ignore the request ] ; --- Create a dynamic area --- MOV R0,#0 ;Create new dynamic area MOV R1,#-1 ;Give me any old number MOV R2,#0 ;Zero size initially MOV R3,#-1 ;Don't care about base addr MOV R4,#(1<<7) ;Don't let user drag the bar MOV R6,#0 ;No dynamic area handler MOV R7,#0 ;I wuz told to do this SWI XOS_DynamicArea ;Try to create the area BVS %10flex_init ;It failed -- use WimpSlot ; --- Set up workspace for this --- STR R3,flex__base ;Store base of the area STR R3,flex__free ;The first free part STR R3,flex__end ;And the end STR R1,flex__dynArea ;Save dynamic area handle MOV R0,#fFlag__compact + fFlag__inited + fFlag__dynArea STR R0,flex__flags ;Store the appropriate flags ; --- Add in tidy-up routine to delete the area --- [ OPT_SAPPHIRE BL except_init ;We need to clear it up ADR R0,flex__exit ;Point to exit handler MOV R1,R12 ;Pass it my workspace BL except_atExit ;Register the routine ] [ OPT_ATEXIT ADR R0,flex_die ;Point to exit handler BL atexit ;And call that ] B %20flex_init ;And continue initialisation ] ; --- Find out the slot size --- 10flex_init MOV R0,#-1 ;Read current slot size BL flex__setslot ;Do the slot thing ; --- Store initial heap information --- STR R0,flex__base ;The start of the heap STR R0,flex__free ;The first free part STR R0,flex__end ;And the end MOV R0,#fFlag__compact + fFlag__inited ;Empty heaps is compact heaps STR R0,flex__flags ; --- Get the page size of the machine --- 20flex_init SWI OS_ReadMemMapInfo ;Get page size (in R0) STR R0,flex__chunk ;Store for future reference ; --- Set up the flex relocation stack --- [ OPT_STACK ADR R14,flex__relocStk ;Point to the stack base STR R14,flex__relocSP ;Save this as initial SP ] ; --- Register the flex compactor as a postfilter --- [ OPT_SAPPHIRE BL event_init ;Initialise event system ADR R0,flex__preFilter ;Point to the prefilter MOV R1,R12 ;Give it my workspace ptr BL event_preFilter ;Add the filter into the list ADR R0,flex__postFilter ;Point to the postfilter MOV R1,R12 ;Give it my workspace ptr BL event_postFilter ;Add the filter into the list ] LDMFD R13!,{R0-R8,R12,PC}^ [ OPT_SAPPHIRE flex__optName DCB "FLEX" ] LTORG [ OPT_SAPPHIRE flex__wSpace DCD 0 ] [ OPT_APCS flex__wSpace DCD flex__sSpace ] ; --- flex__preFilter --- ; ; On entry: R0 == WIMP event mask ; R1 == pointer to event block ; R2 == time to return, or 0 ; R3 == pointer to poll word if necessary ; ; On exit: R0,R2 maybe updated to enable idle events ; ; Use: Enables full idle events if the flex heap needs compacting. [ OPT_SAPPHIRE flex__preFilter ROUT STMFD R13!,{R14} ;Save a register LDR R14,flex__flags ;Find the flags word TST R14,#fFlag__compact ;Is the heap compacted? BICEQ R0,R0,#1 ;No -- unmask idle events MOVEQ R2,#0 ;And return immediately LDMFD R13!,{PC}^ ;Return to caller LTORG ] ; --- flex__postFilter --- ; ; On entry: R0 == WIMP reason code ; R1 == pointer to event block ; R2 == time to return or 0 ; R3 == pointer to poll word or nothing really ; ; On exit: Everything must be preserved ; ; Use: Compacts the flex heap every idle event [ OPT_SAPPHIRE flex__postFilter ROUT CMP R0,#0 ;Is this an idle event? MOVNES PC,R14 ;No -- then return right now STMFD R13!,{R0} ;Save a register LDR R0,flex__flags ;Find the flags word TST R0,#fFlag__compact ;Is the heap compacted? LDMFD R13!,{R0} ;Restore the register's value MOVNES PC,R14 ;Return if it is B flex__compact ;Go give the heap a nudge LTORG ] [ OPT_APCS ; --- flex_budge / flex_dont_budge --- ; ; On entry: -- ; ; On exit: -- ; ; Use: Nothing. Both of these do the same thing. EXPORT flex_budge EXPORT flex_dont_budge flex_budge ROUT flex_dont_budge MOV R0,#0 ;Refuse the budge MOVS PC,R14 ;And return ] ; --- flex_die / flex__exit --- ; ; On entry: -- ; ; On exit: -- ; ; Use: Kills the dynamic area which we own. [ OPT_DYNAREA [ OPT_SAPPHIRE flex__exit ROUT | EXPORT flex_die flex_die ROUT ] STMFD R13!,{R0,R1,R14} ;Save some registers ; --- The C library's `atexit' doesn't provide context --- [ :LNOT:OPT_SAPPHIRE WSPACE flex__wSpace ] ; --- Now free the dynamic area --- LDR R1,flex__dynArea ;Load the handle CMP R1,#0 ;Is it defined? MOVNE R0,#1 ;Yes -- remove it SWINE OS_DynamicArea ;Do the remove job MOVNE R0,#0 ;Now clear the handle STRNE R0,flex__dynArea ;So we don't do it again LDMFD R13!,{R0,R1,PC}^ ;Finally, return to caller LTORG ] [ OPT_STACK ; --- flex_stackPtr --- ; ; On entry: R0 == 0 to read, or value to set ; ; On exit: R0 == old value ; ; Use: Either reads or writes the flex stack pointer. This sort ; of thing is useful in exception handlers etc. EXPORT flex_stackPtr flex_stackPtr ROUT STMFD R13!,{R12,R14} ;Save some registers WSPACE flex__wSpace ;Find the workspace LDR R14,flex__relocSP ;Load the current value CMP R0,#0 ;Does he want to write it? STRNE R0,flex__relocSP ;Yes -- then write it MOV R0,R14 ;Return the old value LDMFD R13!,{R12,PC}^ ;And return to caller LTORG ; --- flex_save --- ; ; On entry: R0 == value to save, for APCS ; ; On exit: -- ; ; Use: Saves some registers on the flex relocation stack. R13 ; and R14 cannot be saved -- these registers are corrupted ; during this routine's execution. ; ; Values saved on the flex relocation stack are adjusted as ; flex moves blocks of memory around, so that they still point ; to the same thing as they did before. Obviously, values ; which aren't pointers into flex blocks may be corrupted. ; Values pointing to objects deleted (either free blocks, or ; areas removed by flex_midExtend) may also be corrupted. ; ; Since this routine takes no arguments, some other method has ; to be used. The method chosen is to follow the call to ; flex_save with a LDM or STM instruction containing the ; registers to be saved. This instruction is skipped by the ; routine, and thus not executed. ; ; Note that if you give the LDM or STM the same condition code ; as the BL preceding it, it will never be executed, since ; flex_save skips it if the condition is true and it can't be ; executed if the condition is false. ; ; (All the above is only true for the Sapphire version.) EXPORT flex_save flex_save ROUT [ :LNOT:OPT_APCS ; --- StrongARM friendly version 1st October 1996 [mdw] --- STMFD R13!,{R10,R11,R12,R14} ;Save some registers away BIC R10,R14,#&FC000003 ;Clear processor flags WSPACE flex__wSpace ;Locate flex's workspace LDR R11,flex__relocSP ;Load the stack pointer LDR R10,[R10,#0] ;Load the instruction out ; --- Rather optimised code --- ; ; Shift two bits at a time into C and N. Leave early if ; possible. TST R10,#&03F BEQ %f05 MOVS R14,R10,LSL #31 STRMI R0,[R11],#4 STRCS R1,[R11],#4 MOVS R14,R10,LSL #29 STRMI R2,[R11],#4 STRCS R3,[R11],#4 TST R10,#&FF0 BEQ %f05 MOVS R14,R10,LSL #27 STRMI R4,[R11],#4 STRCS R5,[R11],#4 TST R10,#&FC0 BEQ %f00 05 MOVS R14,R10,LSL #25 STRMI R6,[R11],#4 STRCS R7,[R11],#4 MOVS R14,R10,LSL #23 STRMI R8,[R11],#4 STRCS R9,[R11],#4 TST R10,#&C00 BEQ %f00 MOVS R14,R10,LSL #21 LDRMI R14,[R13,#0] STRMI R14,[R11],#4 LDRCS R14,[R13,#4] STRCS R14,[R11],#4 00 ; --- Tidy up and return home --- STR R11,flex__relocSP ;Store new stack ptr LDMFD R13!,{R10,R11,R12,R14} ;And return to caller ADDS PC,R14,#4 | STMFD R13!,{R12,R14} ;Save registers WSPACE flex__wSpace ;Find my workspace LDR R14,flex__relocSP ;Load the current pointer STR R0,[R14],#4 ;Store the value away STR R14,flex__relocSP ;Store the stack pointer LDMFD R13!,{R12,PC}^ ;And return to caller ] LTORG ; --- flex_load --- ; ; On entry: -- ; ; On exit: Registers loaded from relocation stack as requested ; ; Use: Restores registers saved on flex's relocation stack. See ; flex_save for calling information and details about the ; relocation stack. EXPORT flex_load flex_load ROUT [ :LNOT:OPT_APCS ; --- StrongARM friendly version 1st October 1996 [mdw] --- STMFD R13!,{R10,R11,R12,R14} ;Save some registers away BIC R10,R14,#&FC000003 ;Clear processor flags WSPACE flex__wSpace ;Locate flex's workspace LDR R11,flex__relocSP ;Load the stack pointer LDR R10,[R10,#0] ;Load the instruction out ; --- Rather optimised code --- ; ; Shift two bits at a time into C and N. Leave early if ; possible. Do it backwards, because otherwise it doesn't ; work. TST R10,#&FF0 BEQ %f05 MOVS R14,R10,LSL #21 LDRCS R14,[R11,#-4]! STRCS R14,[R13,#4] LDRMI R14,[R11,#-4]! STRMI R14,[R13,#0] MOVS R14,R10,LSL #23 LDRCS R9,[R11,#-4]! LDRMI R8,[R11,#-4]! TST R10,#&0FF BEQ %f00 MOVS R14,R10,LSL #25 LDRCS R7,[R11,#-4]! LDRMI R6,[R11,#-4]! TST R10,#&03F BEQ %f00 MOVS R14,R10,LSL #27 LDRCS R5,[R11,#-4]! LDRMI R4,[R11,#-4]! TST R10,#&00F BEQ %f00 05 MOVS R14,R10,LSL #29 LDRCS R3,[R11,#-4]! LDRMI R2,[R11,#-4]! MOVS R14,R10,LSL #31 LDRCS R1,[R11,#-4]! LDRMI R0,[R11,#-4]! 00 ; --- Tidy up and return home --- STR R11,flex__relocSP ;Store new stack ptr LDMFD R13!,{R10,R11,R12,R14} ;And return to caller ADDS PC,R14,#4 | STMFD R13!,{R12,R14} ;Save registers WSPACE flex__wSpace ;Find my workspace LDR R14,flex__relocSP ;Load the current pointer LDR R0,[R14,#-4]! ;Load the value out STR R14,flex__relocSP ;Store the stack pointer LDMFD R13!,{R12,PC}^ ;And return to caller ] LTORG ; --- flex__reloc --- ; ; On entry: R0 == destination pointer of move ; R1 == source pointer of move ; R2 == length of block to move ; ; On exit: -- ; ; Use: Relocates the flex stack after a heap operation which moved ; memory. The arguments are intentionally the same as those ; for fastMove, which should be called immediately before this ; routine. flex__reloc ROUT STMFD R13!,{R3,R4,R14} ;Save some registers ; --- Set up initial values --- ADR R3,flex__relocStk ;Point to the flex stack base LDR R4,flex__relocSP ;Load the current stack ptr ; --- Go through all the stack entries --- 00flex__reloc CMP R3,R4 ;Have we reached the end? LDMGEFD R13!,{R3,R4,PC}^ ;Yes -- return to caller LDR R14,[R3],#4 ;Load the saved value SUB R14,R14,R1 ;Subtract the source base CMP R14,R2 ;Is value within block? ADDLO R14,R14,R0 ;Yes -- add the destination STRLO R14,[R3,#-4] ;Store pointer if it changed B %00flex__reloc ;And go round for the rest LTORG ] ; --- flex_dump --- [ OPT_DUMP EXPORT flex_dump flex_dump ROUT STMFD R13!,{R0-R12,R14} SWI XOS_NewLine SWI XOS_NewLine SWI XOS_Write0 SWI XOS_NewLine LDR R11,=flex__data LDR R5,[R11,#flex__base] LDR R6,[R11,#flex__free] LDR R7,[R11,#flex__end] SWI XOS_WriteS DCB "Heap base: ",0 MOV R0,R5 BL writeHex SWI XOS_WriteS DCB 13,10,"Heap free area: ",0 MOV R0,R6 BL writeHex SWI XOS_WriteS DCB 13,10,"Heap end: ",0 MOV R0,R7 BL writeHex 00 CMP R5,R6 LDMGEFD R13!,{R0-R12,PC}^ SWI XOS_WriteS DCB 13,10,10,"Block address: ",0 MOV R0,R5 BL writeHex SWI XOS_WriteS DCB 13,10," Size: ",0 LDR R0,[R5,#flex__size] BL writeHex SWI XOS_WriteS DCB 13,10," Anchor: ",0 LDR R0,[R5,#flex__bkanchor] BL writeHex LDR R0,[R5,#flex__size] ADD R0,R0,#flex__ohead+7 BIC R0,R0,#7 ADD R5,R5,R0 B %00 writeHex STMFD R13!,{R1,R2,R14} SUB R1,R13,#256 MOV R2,#256 SWI XOS_ConvertHex8 SWI XOS_Write0 LDMFD R13!,{R1,R2,PC}^ writeDec STMFD R13!,{R1,R2,R14} SUB R1,R13,#256 MOV R2,#256 SWI XOS_ConvertInteger4 SWI XOS_Write0 LDMFD R13!,{R1,R2,PC}^ LTORG ] ;----- Workspace ------------------------------------------------------------ ; --- Flags --- fFlag__inited EQU (1<<0) ;We are currently running fFlag__compact EQU (1<<1) ;The heap is compact ATM [ OPT_DYNAREA fFlag__dynArea EQU (1<<2) ;Using a dynamic area ] ; --- Flex block format --- ^ 0 flex__bkanchor # 4 ;Back-pointer to flex anchor flex__size # 4 ;Size of this flex block flex__ohead # 0 ;The flex overhead on blocks ; --- Static data --- [ :LNOT:OPT_STANDALONE ^ 0,R12 flex__wStart # 0 [ OPT_DYNAREA GBLL FLEXWS_DYNAREA ] [ OPT_STACK GBLL FLEXWS_STACK ] GET libs:sh.flexws flex__wSize EQU {VAR}-flex__wStart ] [ OPT_SAPPHIRE AREA |Sapphire$$LibData|,CODE,READONLY DCD flex__wSize DCD flex__wSpace DCD 0 DCD flex_init ] [ OPT_APCS AREA |C$$zidata|,DATA,NOINIT flex__sSpace % flex__wSize ] ;----- That's all, folks ---------------------------------------------------- END