Wednesday, 30 January 2019

MongoDB Replication set up


We will have 3 instance of Mongo In order to achieve the Replication in MongoDB.

C:\Program Files\MongoDB\Server\4.0\bin>mongod --smallfiles --oplogSize 50 --replSet test --port 27017 --dbpath C:\data\db

C:\Program Files\MongoDB\Server\4.0\bin>mongod --smallfiles --oplogSize 50 --replSet test --port 27018 --dbpath D:\data\db

C:\Program Files\MongoDB\Server\4.0\bin>mongod --smallfiles --oplogSize 50 --replSet test --port 27019 --dbpath E:\data\db







C:\>cd "\Program Files\MongoDB\Server\4.0\bin"


C:\Program Files\MongoDB\Server\4.0\bin>mongo
MongoDB shell version v4.0.2
connecting to: mongodb://127.0.0.1:27017
MongoDB server version: 4.0.2
Server has startup warnings:
2019-01-24T10:26:02.197+0530 I CONTROL  [initandlisten]
2019-01-24T10:26:02.197+0530 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the d
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] **          Read and write access to data and confi
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten]
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] ** WARNING: This server is bound to localhost.
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] **          Remote systems will be unable to connec
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] **          Start the server with --bind_ip <addres
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] **          addresses it should serve responses fro
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] **          bind to all interfaces. If this behavio
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten] **          server with --bind_ip 127.0.0.1 to disa
2019-01-24T10:26:02.198+0530 I CONTROL  [initandlisten]

C:\Program Files\MongoDB\Server\4.0\bin>mongo --host 127.0.0.1:27018
MongoDB shell version v4.0.2
connecting to: mongodb://127.0.0.1:27018/
MongoDB server version: 4.0.2
Server has startup warnings:
2019-01-24T10:39:24.775+0530 I CONTROL  [initandlisten]
2019-01-24T10:39:24.775+0530 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
2019-01-24T10:39:24.775+0530 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
2019-01-24T10:39:24.775+0530 I CONTROL  [initandlisten]
2019-01-24T10:39:24.776+0530 I CONTROL  [initandlisten] ** WARNING: This server is bound to localhost.
2019-01-24T10:39:24.776+0530 I CONTROL  [initandlisten] **          Remote systems will be unable to connect to this server.
2019-01-24T10:39:24.776+0530 I CONTROL  [initandlisten] **          Start the server with --bind_ip <address> to specify which IP
2019-01-24T10:39:24.777+0530 I CONTROL  [initandlisten] **          addresses it should serve responses from, or with --bind_ip_all to
2019-01-24T10:39:24.777+0530 I CONTROL  [initandlisten] **          bind to all interfaces. If this behavior is desired, start the
2019-01-24T10:39:24.777+0530 I CONTROL  [initandlisten] **          server with --bind_ip 127.0.0.1 to disable this warning.
2019-01-24T10:39:24.778+0530 I CONTROL  [initandlisten]
MongoDB Enterprise test:SECONDARY>


C:\Program Files\MongoDB\Server\4.0\bin>mongo --host 127.0.0.1:27019
MongoDB shell version v4.0.2
connecting to: mongodb://127.0.0.1:27019/
MongoDB server version: 4.0.2
Server has startup warnings:
2019-01-30T12:06:55.398+0530 I CONTROL  [initandlisten]
2019-01-30T12:06:55.398+0530 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
2019-01-30T12:06:55.398+0530 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
2019-01-30T12:06:55.399+0530 I CONTROL  [initandlisten]
2019-01-30T12:06:55.399+0530 I CONTROL  [initandlisten] ** WARNING: This server is bound to localhost.
2019-01-30T12:06:55.399+0530 I CONTROL  [initandlisten] **          Remote systems will be unable to connect to this server.
2019-01-30T12:06:55.400+0530 I CONTROL  [initandlisten] **          Start the server with --bind_ip <address> to specify which IP
2019-01-30T12:06:55.400+0530 I CONTROL  [initandlisten] **          addresses it should serve responses from, or with --bind_ip_all to
2019-01-30T12:06:55.400+0530 I CONTROL  [initandlisten] **          bind to all interfaces. If this behavior is desired, start the
2019-01-30T12:06:55.400+0530 I CONTROL  [initandlisten] **          server with --bind_ip 127.0.0.1 to disable this warning.
2019-01-30T12:06:55.401+0530 I CONTROL  [initandlisten]
MongoDB Enterprise test:SECONDARY>



MongoDB Enterprise test:PRIMARY> rs.initiate();
{
        "operationTime" : Timestamp(1548307074, 1),
        "ok" : 0,
        "errmsg" : "already initialized",
        "code" : 23,
        "codeName" : "AlreadyInitialized",
        "$clusterTime" : {
                "clusterTime" : Timestamp(1548307074, 1),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}


MongoDB Enterprise test:PRIMARY> rs.add(127.0.0.1:27018);
2019-01-24T10:48:31.528+0530 E QUERY    [js] SyntaxError: missing ) after argument list @(shell):1:12
MongoDB Enterprise test:PRIMARY> rs.add("127.0.0.1:27018");
{
        "ok" : 1,
        "operationTime" : Timestamp(1548307139, 1),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1548307139, 1),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}

MongoDB Enterprise test:PRIMARY> rs.add(127.0.0.1:27019);

MongoDB Enterprise test:PRIMARY> rs.conf();
{
        "_id" : "test",
        "version" : 2,
        "protocolVersion" : NumberLong(1),
        "writeConcernMajorityJournalDefault" : true,
        "members" : [
                {
                        "_id" : 0,
                        "host" : "localhost:27017",
                        "arbiterOnly" : false,
                        "buildIndexes" : true,
                        "hidden" : false,
                        "priority" : 1,
                        "tags" : {

                        },
                        "slaveDelay" : NumberLong(0),
                        "votes" : 1
                },
                {
                        "_id" : 1,
                        "host" : "127.0.0.1:27018",
                        "arbiterOnly" : false,
                        "buildIndexes" : true,
                        "hidden" : false,
                        "priority" : 1,
                        "tags" : {

                        },
                        "slaveDelay" : NumberLong(0),
                        "votes" : 1
                }
        ],
        "settings" : {
                "chainingAllowed" : true,
                "heartbeatIntervalMillis" : 2000,
                "heartbeatTimeoutSecs" : 10,
                "electionTimeoutMillis" : 10000,
                "catchUpTimeoutMillis" : -1,
                "catchUpTakeoverDelayMillis" : 30000,
                "getLastErrorModes" : {

                },
                "getLastErrorDefaults" : {
                        "w" : 1,
                        "wtimeout" : 0
                },
                "replicaSetId" : ObjectId("5c416b3d712e9a0ff1ac72b7")
        }
}


MongoDB Enterprise test:PRIMARY> rs.status();
{
        "set" : "test",
        "date" : ISODate("2019-01-24T05:20:36.793Z"),
        "myState" : 1,
        "term" : NumberLong(7),
        "syncingTo" : "",
        "syncSourceHost" : "",
        "syncSourceId" : -1,
        "heartbeatIntervalMillis" : NumberLong(2000),
        "optimes" : {
                "lastCommittedOpTime" : {
                        "ts" : Timestamp(1548307234, 1),
                        "t" : NumberLong(7)
                },
                "readConcernMajorityOpTime" : {
                        "ts" : Timestamp(1548307234, 1),
                        "t" : NumberLong(7)
                },
                "appliedOpTime" : {
                        "ts" : Timestamp(1548307234, 1),
                        "t" : NumberLong(7)
                },
                "durableOpTime" : {
                        "ts" : Timestamp(1548307234, 1),
                        "t" : NumberLong(7)
                }
        },
        "lastStableCheckpointTimestamp" : Timestamp(1548307194, 1),
        "members" : [
                {
                        "_id" : 0,
                        "name" : "localhost:27017",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        "uptime" : 1475,
                        "optime" : {
                                "ts" : Timestamp(1548307234, 1),
                                "t" : NumberLong(7)
                        },
                        "optimeDate" : ISODate("2019-01-24T05:20:34Z"),
                        "syncingTo" : "",
                        "syncSourceHost" : "",
                        "syncSourceId" : -1,
                        "infoMessage" : "",
                        "electionTime" : Timestamp(1548305762, 1),
                        "electionDate" : ISODate("2019-01-24T04:56:02Z"),
                        "configVersion" : 2,
                        "self" : true,
                        "lastHeartbeatMessage" : ""
                },
                {
                        "_id" : 1,
                        "name" : "127.0.0.1:27018",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",
                        "uptime" : 97,
                        "optime" : {
                                "ts" : Timestamp(1548307234, 1),
                                "t" : NumberLong(7)
                        },
                        "optimeDurable" : {
                                "ts" : Timestamp(1548307234, 1),
                                "t" : NumberLong(7)
                        },
                        "optimeDate" : ISODate("2019-01-24T05:20:34Z"),
                        "optimeDurableDate" : ISODate("2019-01-24T05:20:34Z"),
                        "lastHeartbeat" : ISODate("2019-01-24T05:20:35.574Z"),
                        "lastHeartbeatRecv" : ISODate("2019-01-24T05:20:36.616Z"),
                        "pingMs" : NumberLong(0),
                        "lastHeartbeatMessage" : "",
                        "syncingTo" : "localhost:27017",
                        "syncSourceHost" : "localhost:27017",
                        "syncSourceId" : 0,
                        "infoMessage" : "",
                        "configVersion" : 2
                }
        ],
        "ok" : 1,
        "operationTime" : Timestamp(1548307234, 1),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1548307234, 1),
                "signature" : {
                        "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                        "keyId" : NumberLong(0)
                }
        }
}


MongoDB Enterprise test:PRIMARY>

MongoDB Enterprise test:PRIMARY> use exampleDB
switched to db exampleDB
MongoDB Enterprise test:PRIMARY> for (var i = 0; i <= 10; i++) db.exampleCollection.insert( { x : i } )
WriteResult({ "nInserted" : 1 })
MongoDB Enterprise test:PRIMARY> db.exampleCollection.find().pretty();
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f5f"), "x" : 0 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f60"), "x" : 1 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f61"), "x" : 2 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f62"), "x" : 3 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f63"), "x" : 4 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f64"), "x" : 5 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f65"), "x" : 6 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f66"), "x" : 7 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f67"), "x" : 8 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f68"), "x" : 9 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f69"), "x" : 10 }
MongoDB Enterprise test:PRIMARY>



Now go to the secondary and check if the data is replicated.
MongoDB Enterprise test:SECONDARY> db.getMongo().setSlaveOk()



MongoDB Enterprise test:SECONDARY> db.exampleCollection.find().pretty();
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f5f"), "x" : 0 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f61"), "x" : 2 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f63"), "x" : 4 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f65"), "x" : 6 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f62"), "x" : 3 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f67"), "x" : 8 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f69"), "x" : 10 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f64"), "x" : 5 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f68"), "x" : 9 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f60"), "x" : 1 }
{ "_id" : ObjectId("5c494c1fa8a110e1bca27f66"), "x" : 7 }
MongoDB Enterprise test:SECONDARY>

Check on the another secondary .


To test the fail-over lets us kill the primary now. Check the rs.status() on another secondary.


Its shows the earlier primary is not available and the one running on the 27018 becomes the primary.

Let start the 27017 again.





Monday, 21 January 2019

Install MongoDB 4.0 on Windows 8

1.    Now go to https://www.mongodb.com/download-center
2.    Click Download (msi) and run it.
3.    Click Next.






 4.    Accept the terms.




  5. You choose the Complete (recommended for most users)


6. Select Install MongoD as a Service.





7.  To install Compass, select Install MongoDB Compass (Default).



8. Click Install.









Run MongoDB Community Edition from the Command Interpreter

Create database directory.

From the command line
cd D:\
md “\MongoData \db”

Start your MongoDB database
To start MongoDB, run mongod.exe.
Let’s point server to your database directory.

"C:\Program Files\MongoDB\Server\4.0\bin\mongod.exe" --dbpath="c:\data\db"



If the MongoDB database server is running correctly, the Command Interpreter displays:

Connect to MongoDB.

"C:\Program Files\MongoDB\Server\4.0\bin\mongo.exe"




Monday, 31 December 2018

Achieving the Data Integrity and Incrementally Update the data on ElasticSearch Using Logstash

In order to avoid the duplication issues at elasticsearch you may need to add the unique id for the documents at elasticsearch.

Modify the logstashMSSQL_Test.conf by adding the "document_id" => "%{studentid}" in the output like below.

output {
  stdout { codec => json_lines }
  elasticsearch {
  "hosts" => "localhost:9200"
  "index" => "test-migrate"
  "document_type" => "data"
  "document_id" => "%{studentid}"
  }


Schedule the same migration if the data is continuosly been updateded on the MSSQL Sever side
This will be running after every one minute
Add the below part in order to incrementing update

schedule => "* * * * *"
statement => "SELECT * FROM Student where studentid > :sql_last_value"
use_column_value => true
tracking_column => studentid

input {
  jdbc {
    jdbc_connection_string => "jdbc:sqlserver://localhost:1433;databaseName=test"
    # The user we wish to execute our statement as
    jdbc_user => "sa"
    jdbc_password => "sasa"
    # The path to our downloaded jdbc driver
    jdbc_driver_library => "C:\Users\abhijitb\.m2\repository\com\microsoft\sqlserver\mssql-jdbc\6.2.2.jre8\mssql-jdbc-6.2.2.jre8.jar"
    jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
    # our query
#clean_run => true
schedule => "* * * * *"
    statement => "SELECT * FROM Student where studentid > :sql_last_value"
    use_column_value => true
    tracking_column => "studentid"
    }
}

output {
  #stdout { codec => json_lines }
  elasticsearch {
  "hosts" => "localhost:9200"
  "index" => "student"
  "document_type" => "data"
  "document_id" => "%{studentid}"
  }
}
 
Add the data to Table Student one by one.

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(8,'David','Bruce', 32, 'M', 'Paris', 'FRANCE', GETDATE());

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(9,'Steve','Warne', 30, 'M', 'Paris', 'FRANCE', GETDATE()-200);

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(10,'Ajit','Patil', 32, 'M', 'Pune', 'India', GETDATE()-100);

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(11,'Steve','Scot', 32, 'M', 'Paris', 'FRANCE', GETDATE()-50);


Once you update the file, go the
Go the path C:\logstash-6.5.4\logstash-6.5.4\bin> on the command prompt.
run the below command "logstash -f logstashMSSQL_Test.conf"

C:\logstash-6.5.4\logstash-6.5.4\bin>logstash -f logstashMSSQL_Test.conf



To verify the Data on the ElasticSeach side click the link on the browser.
http://localhost:9200/student/_search?pretty=true


Check the data in descending order. You can verify the when the data is added to elastic search by checking the timestamp of it.
http://localhost:9200/student/_search?q=*&sort=studentid:desc&pretty=true



To check more properties visit https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html

Example of Migrating the MSSQL Data to ElasticSearch using the Logstash.

Example of MSSQL SERVER DATABASE + LogStash + Elastic Search

Step 1 :

I assume the MSSQL server database is installed.

Let create a table and insert some data to it.

Use Test;

create table Student (StudentId int, FirstName varchar(255), LastName varchar(255), Age int, Sex char(2), City varchar(255), Country varchar(255), EnrolledDate datetime);

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(1,'Abhijit','Bashetti',33, 'M', 'Pune', 'India', '2016-05-23 16:12:03');

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(2,'Mark','Brown',31, 'M', 'Mumbai', 'India', '2017-05-23 16:12:03');

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(3,'Michell','Oak',31, 'F', 'NewYork', 'US', '2017-08-25 16:12:03');

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(4,'Peter','Samprass', 30, 'M', 'Sydeny', 'Australlia', '2017-09-30 16:12:03');

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(5,'Andre','Russel', 30, 'M', 'London', 'UK', '2017-12-01 16:12:03');

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(6,'Kevin','Peterson', 32, 'M', 'London', 'UK', '2017-12-21 16:12:03');

INSERT INTO Student (StudentId, FirstName, LastName, Age, Sex, City, Country, EnrolledDate) VALUES
(7,'Kevin','Burake', 32, 'M', 'London', 'UK', '2017-12-21 16:12:03');

select * from Student;



Step 2 :

Now Install the elastic search by downloading the same from https://www.elastic.co/downloads/elasticsearch

Go to the installed path like "C:\Program Files\Elastic\Elasticsearch\6.5.3\bin". and start the elastic search.



Step 3 :

Now Download the Logstash. https://www.elastic.co/downloads/logstash

Go to the path C:\logstash-6.5.4\logstash-6.5.4\config

Copy the logstash-sample.conf for the MSSQL DATABASE.
Rename it to logstashMSSQL_Test.conf.

Add below properties.

input {
  jdbc {
# the full url string for the mssql server
    jdbc_connection_string => "jdbc:sqlserver://localhost:1433;databaseName=test"
    # The username and password to execute our statement as
    jdbc_user => "sa"
    jdbc_password => "sasa"
    # The path to our downloaded mssql jdbc driver
    jdbc_driver_library => "C:\Users\abhijitb\.m2\repository\com\microsoft\sqlserver\mssql-jdbc\6.2.2.jre8\mssql-jdbc-6.2.2.jre8.jar"
    jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
    # The required query
statement => "select * from Student"
    }
}

output {
  #stdout { codec => json_lines }
  elasticsearch {
  "hosts" => "localhost:9200"
  "index" => "student"
  "document_type" => "data"
  }
}



Go the path C:\logstash-6.5.4\logstash-6.5.4\bin> on the command prompt.
run the below command "logstash -f logstashMSSQL_Test.conf"

C:\logstash-6.5.4\logstash-6.5.4\bin>logstash -f logstashMSSQL_Test.conf



Step 4 :

Its time to verify if the data is migrated to elasticsearch.

If you have windows git go to the path "C:\Program Files\Git\mingw64\bin>"
and execute the command curl -XPOST "localhost:9200/student/_search?pretty=true"
C:\Program Files\Git\mingw64\bin>curl -XPOST "localhost:9200/student/_search?pretty=true"



or go to the browser and type http://localhost:9200/student/_search?pretty=true




Sunday, 30 December 2018

Indexing and searching to ElasticSearch by Curl

I have listed down the curl request for indexing the data on ElasticSearch and curl request for searching the data on ElasticSearch.

I am using the Curl provided by the Git. You may need to install Window Git in order to use the same. Once you install Windows GIT. Go to the path cd C:\Program Files\Git\mingw64\bin>


1. To create an index named "customer"

curl -X PUT "localhost:9200/customer?pretty"

Response is :

{
  "acknowledged" : true,
  "shards_acknowledged" : true,
  "index" : "customer"
}

2. To list all the indices availabale

curl -X GET "localhost:9200/_cat/indices?v"

3.  To add a document

curl -X PUT "localhost:9200/customer/_doc/1?pretty" -H "Content-Type: application/json" -d "{\"name\": \"John Doe\"}"

4. To view a specific document

curl -X GET "localhost:9200/customer/_doc/1?pretty"

5. To delete a index

curl -X DELETE "localhost:9200/customer?pretty"
curl -X GET "localhost:9200/_cat/indices?v"

6. To Update a document

curl -X POST "localhost:9200/customer/_doc/1/_update?pretty" -H "Content-Type: application/json" -d "{\"doc\": { \"name\": \"Jane Doe\" }}"

7. To Delete a document

curl -X DELETE "localhost:9200/customer/_doc/1?pretty"

To verift run the below command
curl -X GET "localhost:9200/customer/_doc/1?pretty"

C:\Program Files\Git\mingw64\bin>curl -X GET "localhost:9200/customer/_doc/1?pretty"
{
  "_index" : "customer",
  "_type" : "_doc",
  "_id" : "1",
  "found" : false
}

8. Buld adding documents

curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_doc/_bulk?pretty&refresh" --data-binary "@accounts.json"

Check if the index named bank is created
curl "localhost:9200/_cat/indices?v"

9. Search all the document in ascending order

curl -X GET "localhost:9200/bank/_search?q=*&sort=account_number:asc&pretty"

OR

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"match_all\": {} }, \"sort\": [{ \"account_number\": \"asc\" }]}"


10. Search document by mentioning the limit

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"match_all\": {} }, \"size\": 1 }"

11. Search Document in sorting order

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": {\"match_all\": {} }, \"sort\": {\"balance\": { \"order\": \"desc\" } }}"


12. Search all and return data of specific fields

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"match_all\": {} }, \"_source\": [\"account_number\", \"balance\"]}"

13. Search for specific document by applying criteria

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"match\": { \"account_number\": 20 } }}"
curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"match\": { \"address\": \"mill\" } }}"

14. Search for specific document by applying AND criteria with Bool with must

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"bool\": { \"must\": [{ \"match\": { \"address\": \"mill\" } },{ \"match\": { \"address\": \"lane\" } }]}}}"

15. Search for specific document by applying OR criteria with Bool with should

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{\"query\": { \"bool\": { \"should\": [ { \"match\": { \"address\": \"mill\" } }, { \"match\": { \"address\": \"lane\" } }]}}}"

16. Search for specific document by applying AND & NOT criteria with Bool with should

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type: application/json" -d "{ \"query\": { \"bool\": { \"must\": [{ \"match\": { \"age\": \"40\" } }], \"must_not\": [{ \"match\": { \"state\": \"ID\" } }]}}}"

17. Search Documents by applying some filter like greater than , lesser than to it.

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type:application/json" -d "{ \"query\": { \"bool\": { \"must\": { \"match_all\": {} }, \"filter\": { \"range\": { \"balance\": {\"gte\": 20000, \"lte\": 30000}}}}}}"

18. Search Document by some aggregations

curl -X GET "localhost:9200/bank/_search?pretty" -H "Content-Type:application/json" -d "{ \"size\": 0, \"aggs\": { \"group_by_state\": { \"terms\": { \"field\": \"state.keyword\"}}}}"

Sunday, 9 December 2018

Generate the Boilerplate code using Lombok


All the time while doing the programming we do the same thing again and again.The same code is been added by us or we use eclispe to generate the same code. This code is called as boilerplate code. Its the time to get rid of such things. There is a magic available for the same which is called as lombok. Yes, its lombok library which helps you to remove the boilerplate code and also provides some others features. If Lombok is added to your project classpath, you can can easily get rid of all the getters & setters methods, class constructors(argument constructor, no argument constructor), hashcode and equals methods and many more by just adding couple of annotations the class. For more details and other feature please refer https://projectlombok.org/features/all. The guys have created a very nice thing.

I will list down all the annotation provided by the lombok and will see some them with the examples.

@Getter/@Setter : It will generate the getter and setter methods of the class fields. All the methods genearated are by default public. You can set the access level for any field.

@NonNull : Can be used for the parameters of a methos or a constructors for null check. It can be applied to a field as well.

@NoArgsConstructor : It generates constructor with no parameters.

@RequiredArgsConstructor : It generates a constructor with one parameter of each field where it requires special handling.

@AllArgsConstructor : It generates a constructor with 1 parameter for each field in your class.

@ToString : It provides the implementation of toString method to your class where it will print the classs name and the fields of the class.

@EqualsAndHashCode : Same as toString , Lombok provides the implemenation of the equals and hashCode method. It will consider the non-static and non-transient fields. You can also incluse or exclude the fields explicitely @EqualsAndHashCode.Include or @EqualsAndHashCode.Exclude.

@Data : Its combo provided by lombok for some the commonly used annotation. Its combo of
@ToString, @EqualsAndHashCode, @Getter / @Setter and @RequiredArgsConstructor. It covers all of the above in one shot.

Here is an example where I have created a java class named Employee.java
I have added the member as empId,firstName,lastName,address
On the right side, in the outline window you can find all the code generated by lombok.



I have created the object of Employee class and shown how to use the implementation provided by lombok. In this case I have used the toString and equals method.






Saturday, 1 December 2018

Split the large xml into smaller xml using xslt.

Split the large xml to smaller xml using XSLT.

Here is sample of source xml.

<?xml version="1.0" encoding="UTF-8"?>
<ABC>
    <END />
    <Tables>
        <START>
            <row>
                <id>111</id>
                <name>abc</name>
                <deptId>1</deptId>
            </row>
            <row>
                <id>112</id>
                <name>abc1</name>
                <deptId>1</deptId>
            </row>
            <row>
                <id>113</id>
                <name>abc3</name>
                <deptId>1</deptId>
            </row>
            <row>
                <id>222</id>
                <name>def</name>
                <deptId>2</deptId>
            </row>
            <row>
                <id>333</id>
                <name>pqr</name>
                <deptId>2</deptId>
            </row>
            <row>
                <id>444</id>
                <name>xyz</name>
                <deptId>2</deptId>
            </row>
            <row>
                <id>555</id>
                <name>lmn</name>
                <deptId>3</deptId>
            </row>
            <row>
                <id>555</id>
                <name>lmn</name>
                <deptId>3</deptId>
            </row>
        </START>
    </Tables>
</ABC>

I have a xml with the above structure.
I have to spilt the xml into 3 xml's based on the different deptId.
I have to Split the xml into smaller one based on change in tag values.
My elemement is deptId whose values is been changed after some rows.
The all elements with same deptId are in a sequence.

The required output is : Its good to have the xml name as the department id.
The first xml be with name 1.xml :

<?xml version="1.0" encoding="UTF-16"?>
<ABC>
    <END />
    <Tables>
        <START>
            <row>
                <id>111</id>
                <name>abc</name>
                <deptId>1</deptId>
            </row>
            <row>
                <id>112</id>
                <name>abc1</name>
                <deptId>1</deptId>
            </row>
            <row>
                <id>113</id>
                <name>abc3</name>
                <deptId>1</deptId>
            </row>
        </START>
    </Tables>
</ABC>

The solution to the above the problem is to use the XSLT.
Create a xsl file like below.

<xsl:transform xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="2.0">
  <xsl:template match="/">
    <xsl:for-each-group select="//ABC//Tables//START//row" group-adjacent="deptId">
    <xsl:variable name="file" select="concat(deptId,'.xml')"/>
      <xsl:result-document href="{$file}">
        <ABC>
         <END />
          <Tables>
           <START>
            <xsl:copy-of select="current-group()"/>
           </START>
          </Tables>
        </ABC>
      </xsl:result-document>
    </xsl:for-each-group>
  </xsl:template>
</xsl:transform>


Here is the sample piece of java code that need to run.

package com.java.xml;

import java.io.File;

import javax.xml.transform.stream.StreamSource;

import net.sf.saxon.s9api.Processor;
import net.sf.saxon.s9api.SaxonApiException;
import net.sf.saxon.s9api.Serializer;
import net.sf.saxon.s9api.Xslt30Transformer;
import net.sf.saxon.s9api.XsltCompiler;
import net.sf.saxon.s9api.XsltExecutable;

public class SplitXml {

public static void main(String[] args) {
Processor proc = new Processor(false);
XsltCompiler comp = proc.newXsltCompiler();
XsltExecutable exp;
try {
exp = comp.compile(new StreamSource(new File("E://test.xsl")));
Serializer out = proc.newSerializer(new File("E://output.xml"));
Xslt30Transformer trans = exp.load30();
trans.applyTemplates(new StreamSource(new File("E://source.xml")),
out);
} catch (SaxonApiException e) {
e.printStackTrace();
}
}
}