Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
Hercules
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
Hercules
Commits
56777804
Commit
56777804
authored
1 year ago
by
rarbore2
Browse files
Options
Downloads
Patches
Plain Diff
iterate loops bottom-up for forkify
parent
8ac51ec6
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
hercules_ir/src/loops.rs
+24
-0
24 additions, 0 deletions
hercules_ir/src/loops.rs
hercules_opt/src/forkify.rs
+13
-12
13 additions, 12 deletions
hercules_opt/src/forkify.rs
with
37 additions
and
12 deletions
hercules_ir/src/loops.rs
+
24
−
0
View file @
56777804
...
...
@@ -2,6 +2,7 @@ extern crate bitvec;
use
std
::
collections
::
hash_map
;
use
std
::
collections
::
HashMap
;
use
std
::
collections
::
VecDeque
;
use
self
::
bitvec
::
prelude
::
*
;
...
...
@@ -34,6 +35,29 @@ impl LoopTree {
pub
fn
loops
(
&
self
)
->
hash_map
::
Iter
<
'_
,
NodeID
,
(
BitVec
<
u8
,
Lsb0
>
,
NodeID
)
>
{
self
.loops
.iter
()
}
/*
* Sometimes, we need to iterate the loop tree bottom-up. Just assemble the
* order upfront.
*/
pub
fn
bottom_up_loops
(
&
self
)
->
Vec
<
(
NodeID
,
&
BitVec
<
u8
,
Lsb0
>
)
>
{
let
mut
bottom_up
=
vec!
[];
let
mut
children_count
:
HashMap
<
NodeID
,
u32
>
=
self
.loops
.iter
()
.map
(|(
k
,
_
)|
(
*
k
,
0
))
.collect
();
children_count
.insert
(
self
.root
,
0
);
for
(
_
,
(
_
,
parent
))
in
self
.loops
.iter
()
{
*
children_count
.get_mut
(
&
parent
)
.unwrap
()
+=
1
;
}
let
mut
worklist
:
VecDeque
<
_
>
=
self
.loops
.iter
()
.map
(|(
k
,
v
)|
(
*
k
,
&
v
.0
))
.collect
();
while
let
Some
(
pop
)
=
worklist
.pop_front
()
{
if
children_count
[
&
pop
.0
]
==
0
{
*
children_count
.get_mut
(
&
self
.loops
[
&
pop
.0
]
.1
)
.unwrap
()
-=
1
;
bottom_up
.push
(
pop
);
}
else
{
worklist
.push_back
(
pop
);
}
}
bottom_up
}
}
/*
...
...
This diff is collapsed.
Click to expand it.
hercules_opt/src/forkify.rs
+
13
−
12
View file @
56777804
...
...
@@ -19,14 +19,15 @@ pub fn forkify(
)
{
// Ignore loops that are already fork-joins.
let
natural_loops
=
loops
.loops
()
.bottom_up_loops
()
.into_iter
()
.filter
(|(
k
,
_
)|
function
.nodes
[
k
.idx
()]
.is_region
());
// Detect loops that have a simple loop induction variable. TODO: proper
// affine analysis to recognize other cases of linear induction variables.
let
affine_loops
:
Vec
<
_
>
=
natural_loops
.into_iter
()
.filter_map
(|(
header
,
(
contents
,
_
)
)|
{
.filter_map
(|(
header
,
contents
)|
{
// Get the single loop contained predecessor of the loop header.
let
header_uses
=
get_uses
(
&
function
.nodes
[
header
.idx
()]);
let
mut
pred_loop
=
header_uses
.as_ref
()
.iter
()
.filter
(|
id
|
contents
[
id
.idx
()]);
...
...
@@ -43,7 +44,7 @@ pub fn forkify(
let
(
should_be_header
,
pred_datas
)
=
function
.nodes
[
phi
.idx
()]
.try_phi
()
?
;
let
one_c_id
=
function
.nodes
[
one
.idx
()]
.try_constant
()
?
;
if
should_be_header
!=
*
header
||
!
constants
[
one_c_id
.idx
()]
.is_one
()
{
if
should_be_header
!=
header
||
!
constants
[
one_c_id
.idx
()]
.is_one
()
{
return
None
;
}
...
...
@@ -108,12 +109,12 @@ pub fn forkify(
let
header_uses
:
Vec
<
_
>
=
header_uses
.as_ref
()
.into_iter
()
.map
(|
x
|
*
x
)
.collect
();
// Get the control portions of the loop that need to be grafted.
let
loop_pred
=
*
header_uses
let
loop_pred
=
header_uses
.iter
()
.filter
(|
id
|
!
contents
[
id
.idx
()])
.next
()
.unwrap
();
let
loop_true_read
=
*
header_uses
let
loop_true_read
=
header_uses
.iter
()
.filter
(|
id
|
contents
[
id
.idx
()])
.next
()
...
...
@@ -137,14 +138,14 @@ pub fn forkify(
// Create fork and join nodes.
let
fork
=
Node
::
Fork
{
control
:
loop_pred
,
control
:
*
loop_pred
,
factor
:
dc_id
,
};
let
fork_id
=
NodeID
::
new
(
function
.nodes
.len
());
function
.nodes
.push
(
fork
);
let
join
=
Node
::
Join
{
control
:
if
*
header
==
get_uses
(
&
function
.nodes
[
loop_end
.idx
()])
.as_ref
()[
0
]
{
control
:
if
header
==
get_uses
(
&
function
.nodes
[
loop_end
.idx
()])
.as_ref
()[
0
]
{
fork_id
}
else
{
function
.nodes
[
loop_end
.idx
()]
.try_if
()
.unwrap
()
.0
...
...
@@ -158,7 +159,7 @@ pub fn forkify(
// Convert reducing phi nodes to reduce nodes.
let
reduction_phis
:
Vec
<
_
>
=
def_use
.get_users
(
*
header
)
.get_users
(
header
)
.iter
()
.filter
(|
id
|
**
id
!=
idx_phi
&&
function
.nodes
[
id
.idx
()]
.is_phi
())
.collect
();
...
...
@@ -172,7 +173,7 @@ pub fn forkify(
.1
.iter
(),
)
.filter
(|(
c
,
_
)|
**
c
==
loop_pred
)
.filter
(|(
c
,
_
)|
**
c
==
*
loop_pred
)
.next
()
.unwrap
()
.1
;
...
...
@@ -186,7 +187,7 @@ pub fn forkify(
.1
.iter
(),
)
.filter
(|(
c
,
_
)|
**
c
==
loop_true_read
)
.filter
(|(
c
,
_
)|
**
c
==
*
loop_true_read
)
.next
()
.unwrap
()
.1
;
...
...
@@ -224,8 +225,8 @@ pub fn forkify(
function
.nodes
[
idx_phi
.idx
()]
=
Node
::
Start
;
// Delete old loop control nodes;
for
user
in
def_use
.get_users
(
*
header
)
{
get_uses_mut
(
&
mut
function
.nodes
[
user
.idx
()])
.map
(
*
header
,
fork_id
);
for
user
in
def_use
.get_users
(
header
)
{
get_uses_mut
(
&
mut
function
.nodes
[
user
.idx
()])
.map
(
header
,
fork_id
);
}
function
.nodes
[
header
.idx
()]
=
Node
::
Start
;
function
.nodes
[
loop_end
.idx
()]
=
Node
::
Start
;
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment