Performance, resilience and data integrity are a priority in microservice development. To make your life easier, we implemented a benchmarking framework to keep critical non-functional characteristics under constant control. In a similar vein with Pip.Service Toolkit, the framework has symmetrical implementations to make it easier to switch between programming languages.
Example of use:
Create your own benchmark in file SampleBenchmark.ts
import { Benchmark } from 'pip-benchmark-node';
import { RandomBoolean } from 'pip-services3-commons-node';
export class SampleBenchmark extends Benchmark {
private _greeting: string;
public constructor() {
super("SampleBenchmark", "Measures performance of updating");
}
public setUp(callback: (err: any) => void): void {
this._greeting = this.context.parameters.Greeting.getAsString();
callback(null);
}
public execute(callback: (err: any) => void): void {
// Randomly generate message or errors
if (RandomBoolean.chance(1, 10))
this.context.sendMessage(this._greeting);
else if (RandomBoolean.chance(1, 10))
this.context.reportError("Something bad happend...");
// Just wait and do nothing
var sleep = require('sleep');
sleep.sleep(5000); // sleep for 5 seconds
callback(null);
}
}
Now make suit with DemoBenchmark. See code of SampleBenchmarkSuite.ts below:
let sleep = require('sleep');
import { BenchmarkSuite } from 'pip-benchmark-node';
import { SampleBenchmark } from './SampleBenchmark'
export class SampleBenchmarkSuite extends BenchmarkSuite {
public constructor() {
super("Samples", "Provides sample benchmarks")
this.createParameter("Greeting", "Greeting message", "Hello world!");
this.addBenchmark(new SampleBenchmark());
}
}
Create run.ts:
let process = require('process');
import { BenchmarkBuilder } from 'pip-benchmark-node';
import { SampleBenchmarkSuite } from './SampleBenchmarkSuite';
var benchmark = new BenchmarkBuilder()
.addSuite(new SampleBenchmarkSuite())
.forDuration(15)
.forceContinue(true)
.withAllBenchmarks()
.create();
benchmark.run((err: any) => {
if (err) console.error(err);
});
var report = benchmark.report.generate();
console.log(report);
Compile and run benchmark:
tsc
node ./bin/run.js
Example of use:
Create your own benchmark in file SampleBenchmark.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading;
using PipBenchmark.Utilities.Random;
namespace PipBenchmark.Sample.NetCore20
{
public class SampleBenchmark : Benchmark
{
private string _greeting;
public SampleBenchmark()
: base("Sample", "Sample benchmark that does nothing")
{ }
public override void SetUp()
{
// Do nothing...
_greeting = Context.Parameters["Greeting"].AsString;
}
public override void Execute()
{
// Randomly generate message or errors
if (RandomBoolean.Chance(1, 10))
Context.SendMessage(_greeting);
else if (RandomBoolean.Chance(1, 10))
Context.ReportError("Something bad happend...");
// Just wait and do nothing
Thread.Sleep(500);
}
}
}
Now make suit with SampleBenchmark. See code of SampleBenchmarkSuite.cs below:
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading;
namespace PipBenchmark.Sample.NetCore20
{
public class SampleBenchmarkSuite : BenchmarkSuite
{
public SampleBenchmarkSuite()
: base("Samples", "Provides sample benchmarks")
{
CreateParameter("Greeting", "Greeting message", "Hello world!");
AddBenchmark(new SampleBenchmark());
}
}
}
Create Program.cs and run this:
using PipBenchmark.Runner;
using PipBenchmark.Runner.Config;
using PipBenchmark.Runner.Console;
using PipBenchmark.Sample.NetCore20;
using System;
using PipBenchmark.Console;
using PipBenchmark.Utilities;
namespace ConsoleApp1
{
class Program
{
static void Main(string[] args)
{
var benchmark = new ConsoleBenchmarkBuilder()
.AddSuite(new SampleBenchmarkSuite())
.InThreads(1)
.ForDuration(15)
.ForceContinue(true)
.WithAllBenchmarks()
.Create();
benchmark.Run();
var report = benchmark.Report.Generate();
Console.Out.WriteLine();
Console.Out.Write(report);
}
}
}
Example of use:
Python
Example of use:
Java
Example of use:
main.go
package main
import (
"errors"
"time"
bench "github.com/pip-benchmark/pip-benchmark-go/benchmark"
benchconsole "github.com/pip-benchmark/pip-benchmark-go/console"
rnd "github.com/pip-services3-go/pip-services3-commons-go/random"
)
type SampleBenchmark struct {
*bench.Benchmark
greeting string
}
func NewSampleBenchmark() *SampleBenchmark {
c := SampleBenchmark{}
c.Benchmark = bench.NewBenchmark("SampleBenchmark", "Measures performance of updating", "Type") c.Benchmark.IExecutable = &c
c.greeting = "test"
return &c
}
func (c *SampleBenchmark) SetUp() error {
c.greeting = c.Context.GetParameters()["Greeting"].GetAsString()
return nil
}
func (c *SampleBenchmark) TearDown() error {
return nil
}
func (c *SampleBenchmark) Execute() error {
// Randomly generate message or errors
if rnd.RandomBoolean.Chance(1, 10) == true {
c.Context.SendMessage(c.greeting)
} else if rnd.RandomBoolean.Chance(1, 10) == true {
c.Context.ReportError(errors.New("Something bad happend..."))
}
// Just wait and do nothing
time.Sleep(10 * time.Millisecond)
return nil
}
type SampleBenchmarkSuite struct {
*bench.BenchmarkSuite
}
func NewSampleBenchmarkSuite() *SampleBenchmarkSuite {
c := SampleBenchmarkSuite{}
c.BenchmarkSuite = bench.NewBenchmarkSuite("Samples", "Provides sample benchmarks") c.CreateParameter("Greeting", "Greeting message", "Hello world!") c.AddBenchmark(NewSampleBenchmark().Benchmark)
return &c
}
func main() {
var benchmark = benchconsole.NewConsoleBenchmarkBuilder() benchmark.AddSuite(NewSampleBenchmarkSuite().BenchmarkSuite).
ForDuration(5).
ForceContinue(true).
WithAllBenchmarks()
runner := benchmark.Create() runner.Parameters().Set(map[string]string{"General.Benchmarking.MeasurementType": "Nominal"}) runner.Parameters().Set(map[string]string{"General.Benchmarking.ExecutionType": "Sequential"})
runner.Run(func(err error) {
if err != nil {
print(err.Error())
}
})
print(runner.Report().Generate())
}
Run benchmark:
go run ./main.go