单次:
# service iptables status
# service iptables stop
永久:
# chkconfig --list
# chkconfig --list | grep iptables
# chkconfig iptables off
setup
/etc/sysconfig/network-scripts/ifcfg-eth0
service network restart
ifconfig
/etc/sysconfig/network
# hostname 查看主机名
# hostname hdp1 临时修改主机名为hdp1
DNS
/etc/hosts
ping 主机名
su - 用户名 切换用户
sudo 命令 (修改sudoers file /etc/sudoers)
secure shell
前提:两主机具有相同用户名之间可互相登录
~/.ssh
ssh 主机名
# ssh-keygen -t rsa 产生密钥对(1)id_rsa (2)id_rsa.pub
# cat ../id_rsa.pub>>~/.ssh/authorized_keys 权限为600
想免密码登录谁,就把自己的公钥追加给谁的authorized_keys
还可用命令# ssh-copy-id 主机名
# service sshd restart 使配置生效
远程拷贝文件:
# scp 文件名 主机名:目录
# tar -zxvf jdk-7u65-linux-i586.tar.gz
# tar -zxvf hadoop-2.4.1.tar.gz
系统级 /etc/profile
用户级 ~/.bash_profile
export JAVA_HOME=.../jdk
export HADOOP_HOME=.../hadoop
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
执行命令使生效:
# service /etc/profile
hadoop/etc/hadoop:
hadoop-env.sh
core-site.xml
hdfs-site.xml
mapred-site.xml
yarn-site.xml
slaves
masters #SecondNameNode结点
hadoop-env.sh
export JAVA_HOME=.../jdk
core-site.xml
fs.defultFS=hdfs://hdp1:9000
hadoop.tmp.dir=.../hadoop/tmp
hdfs-site.xml
dfs.replication=3
mapred-site.xml
mapreduce.framework.name=yarn
yarn-site.xml
yarn.resourcemanager.hostname=hdp0yarn.nodemanager.aux-services=mapreduce_shuffle
配置完成后用scp命名将Hadoop分发到各个结点。
# hadoop namenode -format
建立目录 ${hadoop.tmp.dir}/dfs/name/current
包含文件:
fsimage_000...0
fsimage_000...0.md5
seen_txi
VERSION
starting namenode
starting datanode #slaves文件
starting secondarynamenode #masters文件
# jps 查看java进程
start-yarn.sh
starting yarn daemons
starting resourcemanager
starting nodemanager
hdfs web服务端口:50070
yarn web服务端口:8088