Your first method appears to be much slower than your second method (by a factor of about 3000 on my platform):
-module(test).
-export([test/0, performance_test/4]).
-define(ITERATIONS, 100000).
-define(NEW_DATA, <<1, 2, 3, 4, 5, 6, 7, 8, 9, 10>>).
accumulate_1(AccumulatedData, NewData) ->
list_to_binary([AccumulatedData, NewData]).
extract_1(AccumulatedData) ->
AccumulatedData.
accumulate_2(AccumulatedData, NewData) ->
[NewData | AccumulatedData].
extract_2(AccumulatedData) ->
list_to_binary(lists:reverse(AccumulatedData)).
performance_test(AccumulateFun, ExtractFun) ->
{Time, _Result} = timer:tc(test, performance_test, [AccumulateFun, ExtractFun, [], ?ITERATIONS]),
io:format("Test run: ~p microseconds~n", [Time]).
performance_test(_AccumulateFun, ExtractFun, AccumulatedData, _MoreIterations = 0) ->
ExtractFun(AccumulatedData);
performance_test(AccumulateFun, ExtractFun, AccumulatedData, MoreIterations) ->
NewAccumulatedData = AccumulateFun(AccumulatedData, ?NEW_DATA),
performance_test(AccumulateFun, ExtractFun, NewAccumulatedData, MoreIterations - 1).
test() ->
performance_test(fun accumulate_1/2, fun extract_1/1),
performance_test(fun accumulate_2/2, fun extract_2/1),
ok.
Output:
7> test:test().
Test run: 57204314 microseconds
Test run: 18996 microseconds