yourLinux:~ # cat /tmp/ebs2.json
[
{"DeviceName":"/dev/sda1","Ebs":{"VolumeSize":100,"VolumeType":"gp2","DeleteOnTermination":true}},
{"DeviceName":"/dev/sdf","Ebs":{"VolumeSize":667,"VolumeType":"gp2","DeleteOnTermination":true}},
{"DeviceName":"/dev/sdg","Ebs":{"VolumeSize":667,"VolumeType":"gp2","DeleteOnTermination":true}},
{"DeviceName":"/dev/sdh","Ebs":{"VolumeSize":667,"VolumeType":"gp2","DeleteOnTermination":true}},
{"DeviceName":"/dev/sdi","Ebs":{"VolumeSize":667,"VolumeType":"gp2","DeleteOnTermination":true}},
{"DeviceName":"/dev/sdj","Ebs":{"VolumeSize":50,"VolumeType":"gp2","DeleteOnTermination":true}}
]
yourLinux:~ # aws ec2 run-instances \
--image-id ami-e22b898c \
--count 1 \
--instance-type r4.2xlarge \
--ebs-optimized \
--private-ip-address 172.31.128.22 \
--key-name=KeyPair \
--security-group-ids sg-07d8b7d9bc71e0e5d \
--subnet-id subnet-0ec93994701de0193 \
--placement AvailabilityZone=ap-northeast-2c,GroupName=myplsgrp \
--instance-initiated-shutdown-behavior stop \
--block-device-mappings file:///tmp/ebs2.json \
--tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=SAP HANA Worker}]'
{
"Instances": [
{
"Monitoring": {
"State": "disabled"
},
"PublicDnsName": "",
"StateReason": {
"Message": "pending",
"Code": "pending"
},
"State": {
"Code": 0,
"Name": "pending"
},
"EbsOptimized": true,
"LaunchTime": "2018-06-22T07:09:04.000Z",
"PrivateIpAddress": "172.31.128.22",
"ProductCodes": [],
"VpcId": "vpc-b49ab4dc",
"CpuOptions": {
"CoreCount": 4,
"ThreadsPerCore": 2
},
"StateTransitionReason": "",
"InstanceId": "i-0d4bb4677f5a80c28",
"ImageId": "ami-e22b898c",
"PrivateDnsName": "ip-172-31-128-22.ap-northeast-2.compute.internal",
"KeyName": "KeyPair",
"SecurityGroups": [
{
"GroupName": "SecGrp",
"GroupId": "sg-07d8b7d9bc71e0e5d"
}
],
yourLinux:~ # aws ec2 describe-instance-status --instance-id i-0d4bb4677f5a80c28
{
"InstanceStatuses": [
{
"InstanceId": "i-0d4bb4677f5a80c28",
"InstanceState": {
"Code": 16,
"Name": "running"
},
"AvailabilityZone": "ap-northeast-2c",
"SystemStatus": {
"Status": "ok",
"Details": [
{
"Status": "passed",
"Name": "reachability"
}
]
},
"InstanceStatus": {
"Status": "ok",
"Details": [
{
"Status": "passed",
"Name": "reachability"
}
]
}
}
]
}
yourLinux:~ # aws ec2 allocate-address
{
"PublicIp": "13.209.86.198",
"Domain": "vpc",
"AllocationId": "eipalloc-035d6f2968843e907"
}
yourLinux:~ # aws ec2 associate-address --instance-id i-0d4bb4677f5a80c28 --allocation-id eipalloc-035d6f2968843e907
{
"AssociationId": "eipassoc-00caee5e05fb50531"
}
yourLinux:~ # ssh -i KeyPair.pem ec2-user@13.209.86.198
SUSE Linux Enterprise Server 12 SP3 x86_64 (64-bit)
As "root" (sudo or sudo -i) use the:
- zypper command for package management
- yast command for configuration management
Management and Config: https://www.suse.com/suse-in-the-cloud-basics
Documentation: https://www.suse.com/documentation/sles-12/
Forum: https://forums.suse.com/forumdisplay.php?93-SUSE-Public-Cloud
Have a lot of fun...
ec2-user@ip-172-31-128-22:~> sudo su -
ip-172-31-128-22:~ #
ip-172-31-128-22:~ # hostname imdbworker
ip-172-31-128-22:~ # echo "imdbworker" > /etc/HOSTNAME
ip-172-31-128-22:~ # cp /etc/hosts /etc/hosts.bak
ip-172-31-128-22:~ # echo "172.31.128.22 imdbworker imdbworker.local" >> /etc/hosts
ip-172-31-128-22:~ # sed -i '/preserve_hostname/ c\preserve_hostname: true' /etc/cloud/cloud.cfg
ip-172-31-128-22:~ # cp /etc/defaultdomain /etc/defaultdomain.bak
ip-172-31-128-22:~ # echo "local" >> /etc/defaultdomain
yourLinux:~ # aws ec2 stop-instances --instance-ids i-0d4bb4677f5a80c28
yourLinux:~ # # wait till the instance is successfully stopped:
yourLinux:~ # aws ec2 start-instances --instance-ids i-0d4bb4677f5a80c28
imdbworker:~ # lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
xvda 202:0 0 100G 0 disk
└─xvda1 202:1 0 100G 0 part /
xvdf 202:80 0 667G 0 disk
xvdg 202:96 0 667G 0 disk
xvdh 202:112 0 667G 0 disk
xvdi 202:128 0 667G 0 disk
xvdj 202:144 0 50G 0 disk
imdbworker:~ # pvcreate /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi
imdbworker:~ # echo "noop" > /sys/block/xvdf/queue/scheduler
imdbworker:~ # echo "noop" > /sys/block/xvdg/queue/scheduler
imdbworker:~ # echo "noop" > /sys/block/xvdh/queue/scheduler
imdbworker:~ # echo "noop" > /sys/block/xvdi/queue/scheduler
Create a volume group.
imdbworker:~ # vgcreate vghana /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi
imdbworker:~ # lvcreate -n lvhanalog -i 4 -I 256 -L 200G vghana
imdbworker:~ # lvcreate -n lvhanadata -i 4 -I 256 -L 800G vghana
imdbworker:~ # mkfs.xfs /dev/mapper/vghana-lvhanalog
imdbworker:~ # mkfs.xfs /dev/mapper/vghana-lvhanadata
imdbworker:~ # mkdir /hana /hana/data /hana/log
imdbworker:~ # mkdir /hana/data/<SID> /hana/log/<SID>
imdbworker:~ # mkfs.xfs -f /dev/xvdj
imdbworker:~ # mkdir /usr/sap
/dev/xvdj /usr/sap xfs nobarrier,noatime,nodiratime,logbsize=256k 0 0
/dev/mapper/vghana-lvhanadata /hana/data xfs nobarrier,noatime,nodiratime,logbsize=256k 0 0
/dev/mapper/vghana-lvhanalog /hana/log xfs nobarrier,noatime,nodiratime,logbsize=256k 0 0
/dev/xvdj /usr/sap xfs nobarrier,noatime,nodiratime,logbsize=256k,delaylog 0 0
/dev/mapper/vghana-lvhanadata /hana/data xfs nobarrier,noatime,nodiratime,logbsize=256k,delaylog 0 0
/dev/mapper/vghana-lvhanalog /hana/log xfs nobarrier,noatime,nodiratime,logbsize=256k,delaylog 0 0
imdbmaster:~ # mount -a
imdbmaster:~ # df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 30G 8.0K 30G 1% /dev
tmpfs 30G 0 30G 0% /dev/shm
tmpfs 30G 23M 30G 1% /run
tmpfs 30G 0 30G 0% /sys/fs/cgroup
/dev/xvda1 99G 1.7G 93G 2% /
tmpfs 6.0G 0 6.0G 0% /run/user/1000
/dev/xvdj 50G 33M 50G 1% /usr/sap
/dev/mapper/vghana-lvhanadata 800G 34M 800G 1% /hana/data
/dev/mapper/vghana-lvhanalog 200G 33M 200G 1% /hana/log
imdbmaster:~ # zypper -n install nfs-kernel-server
imdbmaster:~ # chkconfig nfsserver on
imdbmaster:~ # service nfsserver start
imdbmaster:~ # service nfsserver status
STATD_PORT=”4000”
LOCKD_TCPPORT=”4001”
LOCKD_UDPPORT=”4001”
MOUNTD_PORT=”4002”
#Share global HANA shares
/hana/shared imdbworker(rw,no_root_squash,no_subtree_check)
/backup imdbworker(rw,no_root_squash,no_subtree_check)
imdbmaster:~ # exportfs -a
Memo.
“# exportfs -a“ command will produce below errors (“Function not implemented”) when nfsserver is not started yet.
exportfs: imdbworker:/backup: Function not implemented
exportfs: imdbworker:/hana/shared: Function not implemented
imdbmaster:~ # showmount -e
Export list for imdbmaster:
/backup imdbworker
/hana/shared imdbworker
Memo.
“# showmount -e“ command will produce below error when nfsserver is not started yet.
clnt_create: RPC: Port mapper failure - Unable to receive: errno 111 (Connection refused)
imdbworker:~ # mkdir /hana/shared /backup
edit /etc/auto.master (comment out the line with +auto.master)
#+auto.master
/- auto.direct
edit /etc/auto.direct
/hana/shared -rw,rsize=32768,wsize=32768,timeo=14,intr imdbmaster.local:/hana/shared
/backup -rw,rsize=32768,wsize=32768,timeo=14,intr imdbmaster.local:/backup
restart autofs service.
imdbworker:~ # chkconfig autofs on
imdbworker:~ # service autofs restart
imdbworker:~ # df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 30G 8.0K 30G 1% /dev
tmpfs 30G 0 30G 0% /dev/shm
tmpfs 30G 22M 30G 1% /run
tmpfs 30G 0 30G 0% /sys/fs/cgroup
/dev/xvda1 99G 1.7G 93G 2% /
/dev/xvdj 50G 226M 50G 1% /usr/sap
tmpfs 6.0G 0 6.0G 0% /run/user/1000
imdbmaster.local:/hana/data 800G 5.6G 795G 1% /hana/data
imdbmaster.local:/hana/log 200G 3.9G 197G 2% /hana/log
imdbmaster.local:/hana/shared 200G 9.3G 191G 5% /hana/shared
imdbmaster.local:/backup 1.2T 34M 1.2T 1% /backup
imdbmaster:~ # cd /hana/shared/<SID>/hdblcm
imdbmaster:~ # ./hdblcm --action=add_hosts \
--addhosts=imdbworker:role=worker:group=default:workergroup=default \
--password=<hana adm password>\
--sapadm_password=<sapadm password> \
--sid=<SID> \
--batch
You must be a registered user to add a comment. If you've already registered, sign in. Otherwise, register and sign in.
User | Count |
---|---|
11 | |
11 | |
10 | |
10 | |
9 | |
9 | |
7 | |
6 | |
6 | |
6 |