[Gmp-commit] /var/hg/gmp: Add ARM v6t2 divrem_1.

mercurial at gmplib.org mercurial at gmplib.org
Sun Apr 29 00:03:35 CEST 2012


details:   /var/hg/gmp/rev/f331c6cb447a
changeset: 14904:f331c6cb447a
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Sun Apr 29 00:03:29 2012 +0200
description:
Add ARM v6t2 divrem_1.

diffstat:

 ChangeLog                 |    4 +
 mpn/arm/v6t2/divrem_1.asm |  201 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 205 insertions(+), 0 deletions(-)

diffs (216 lines):

diff -r 2229dbbb2b06 -r f331c6cb447a ChangeLog
--- a/ChangeLog	Sat Apr 28 20:07:15 2012 +0200
+++ b/ChangeLog	Sun Apr 29 00:03:29 2012 +0200
@@ -1,3 +1,7 @@
+2012-04-29  Torbjorn Granlund  <tege at gmplib.org>
+
+	* mpn/arm/v6t2/divrem_1.asm: New file.
+
 2012-04-28  Torbjorn Granlund  <tege at gmplib.org>
 
 	* mpn/thumb/add_n.asm: New file.
diff -r 2229dbbb2b06 -r f331c6cb447a mpn/arm/v6t2/divrem_1.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/arm/v6t2/divrem_1.asm	Sun Apr 29 00:03:29 2012 +0200
@@ -0,0 +1,201 @@
+dnl  ARM mpn_add_n and mpn_sub_n
+
+dnl  Contributed to the GNU project by Torbjorn Granlund.
+
+dnl  Copyright 2012 Free Software Foundation, Inc.
+
+dnl  This file is part of the GNU MP Library.
+
+dnl  The GNU MP Library is free software; you can redistribute it and/or modify
+dnl  it under the terms of the GNU Lesser General Public License as published
+dnl  by the Free Software Foundation; either version 3 of the License, or (at
+dnl  your option) any later version.
+
+dnl  The GNU MP Library is distributed in the hope that it will be useful, but
+dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+dnl  License for more details.
+
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C		norm	unorm	frac
+C StrongARM	 ?
+C XScale	 ?
+C Cortex-A8	 ?
+C Cortex-A9	 13	 14	 13
+C Cortex-A15	 ?
+
+C TODO
+C  * Optimise inner-loops better, they could likely run a cycle or two faster.
+C  * Decrease register usage, streamline non-loop code.
+
+
+define(`qp_arg',  `r0')
+define(`fn',      `r1')
+define(`up_arg',  `r2')
+define(`n_arg',   `r3')
+define(`d_arg',   `0')
+define(`dinv_arg',`4')
+define(`cnt_arg', `8')
+
+define(`n',       `r9')
+define(`qp',      `r5')
+define(`up',      `r6')
+define(`cnt',     `r7')
+define(`tnc',     `r10')
+define(`dinv',    `r0')
+define(`d',       `r4')
+
+ASM_START()
+PROLOGUE(mpn_preinv_divrem_1)
+	stmfd	sp!, {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+	ldr	d,    [sp, #9*4+d_arg]
+	ldr	cnt,  [sp, #9*4+cnt_arg]
+	str	r1, [sp, #9*4+d_arg]	C reuse d stack slot for fn
+	sub	n, r3, #1
+	add	r3, r1, n
+	cmp	d, #0
+	add	qp, qp_arg, r3, lsl #2	C put qp at Q[] end
+	add	up, up_arg, n, lsl #2	C put up at U[] end
+	ldr	dinv, [sp, #9*4+dinv_arg]
+	blt	L(nent)
+	b	L(uent)
+EPILOGUE()
+
+PROLOGUE(mpn_divrem_1)
+	stmfd	sp!, {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+	sub	n, r3, #1
+	ldr	d, [sp, #9*4+d_arg]	C d
+	str	r1, [sp, #9*4+d_arg]	C reuse d stack slot for fn
+	add	r3, r1, n
+	cmp	d, #0
+	add	qp, qp_arg, r3, lsl #2	C put qp at Q[] end
+	add	up, up_arg, n, lsl #2	C put up at U[] end
+	blt	L(normalised)
+
+L(unnorm):
+	clz	cnt, d
+	mov	r0, d, lsl cnt		C pass d << cnt
+	bl	mpn_invert_limb
+L(uent):
+	mov	d, d, lsl cnt		C d <<= cnt
+	cmp	n, #0
+	mov	r1, #0			C r
+	blt	L(frac)
+
+	ldr	r11, [up, #0]
+
+	rsb	tnc, cnt, #32
+	mov	r1, r11, lsr tnc
+	mov	r11, r11, lsl cnt
+	beq	L(uend)
+
+	ldr	r3, [up, #-4]!
+	orr	r2, r11, r3, lsr tnc
+	b	L(mid)
+
+L(utop):
+	mls	r1, d, r8, r11
+	mov	r11, r3, lsl cnt
+	ldr	r3, [up, #-4]!
+	cmp	r1, r2
+	addhi	r1, r1, d
+	subhi	r8, r8, #1
+	orr	r2, r11, r3, lsr tnc
+	cmp	r1, d
+	bcs	L(ufx)
+L(uok):	str	r8, [qp], #-4
+L(mid):	add	r8, r1, #1
+	mov	r11, r2
+	umlal	r2, r8, r1, dinv
+	subs	n, n, #1
+	bne	L(utop)
+
+	mls	r1, d, r8, r11
+	mov	r11, r3, lsl cnt
+	cmp	r1, r2
+	addhi	r1, r1, d
+	subhi	r8, r8, #1
+	cmp	r1, d
+	rsbcs	r1, d, r1
+	addcs	r8, r8, #1
+	str	r8, [qp], #-4
+
+L(uend):add	r8, r1, #1
+	mov	r2, r11
+	umlal	r2, r8, r1, dinv
+	mls	r1, d, r8, r11
+	cmp	r1, r2
+	addhi	r1, r1, d
+	subhi	r8, r8, #1
+	cmp	r1, d
+	rsbcs	r1, d, r1
+	addcs	r8, r8, #1
+	str	r8, [qp], #-4
+L(frac):
+	ldr	r2, [sp, #9*4+d_arg]	C fn
+	cmp	r2, #0
+	beq	L(fend)
+
+L(ftop):mov	r6, #0
+	add	r3, r1, #1
+	umlal	r6, r3, r1, dinv
+	mov	r8, #0
+	mls	r1, d, r3, r8
+	cmp	r1, r6
+	addhi	r1, r1, d
+	subhi	r3, r3, #1
+	subs	r2, r2, #1
+	str	r3, [qp], #-4
+	bne	L(ftop)
+
+L(fend):mov	r11, r1, lsr cnt
+L(rtn):	mov	r0, r11
+	ldmfd	sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+
+L(normalised):
+	mov	r0, d
+	bl	mpn_invert_limb
+L(nent):
+	cmp	n, #0
+	mov	r11, #0			C r
+	blt	L(nend)
+
+	ldr	r11, [up, #0]
+	cmp	r11, d
+	movlo	r2, #0			C hi q limb
+	movhs	r2, #1			C hi q limb
+	subhs	r11, r11, d
+
+	str	r2, [qp], #-4
+	cmp	n, #0
+	beq	L(nend)
+
+L(ntop):ldr	r1, [up, #-4]!
+	add	r12, r11, #1
+	umlal	r1, r12, r11, dinv
+	ldr	r3, [up, #0]
+	mls	r11, d, r12, r3
+	cmp	r11, r1
+	addhi	r11, r11, d
+	subhi	r12, r12, #1
+	cmp	d, r11
+	bls	L(nfx)
+L(nok):	str	r12, [qp], #-4
+	subs	n, n, #1
+	bne	L(ntop)
+
+L(nend):mov	r1, r11			C r
+	mov	cnt, #0			C shift cnt
+	b	L(frac)
+
+L(nfx):	add	r12, r12, #1
+	rsb	r11, d, r11
+	b	L(nok)
+L(ufx):	rsb	r1, d, r1
+	add	r8, r8, #1
+	b	L(uok)
+EPILOGUE()


More information about the gmp-commit mailing list