提交 962f4175 编写于 作者: P pengys5

merge fixed

version: '2'
version: '2.1'
services:
skywalking-storage:
image: skywalking/skywalking-storage:2.1-2017
expose:
- "34000"
depends_on:
- registry-center-server
links:
- registry-center-server
environment:
- REGISTRY_CENTER_URL=registry-center-server:2181
skywalking-alarm:
image: skywalking/skywalking-alarm:2.1-2017
depends_on:
- alarm-coordinate-zk
- mysql-server
- redis-server
links:
- alarm-coordinate-zk
- mysql-server
- redis-server
environment:
- ALARM_COORDINATE_ZK_ADDRESS=alarm-coordinate-zk:2181
- MYSQL_SERVER=mysql-server:3306
- REDIS_SERVER=redis-server:6379
- WEBUI_DEPLOY_ADDRESS=192.168.1.104:8080
- WEBUI_APPLICATION_CONTEXT=skywalking
- ALARM_MAIL_HOST=smtp.mail.com
- MAIL_USER_NAME=skywalking
- MAIL_PASSWORD=skywalking
- MAIL_SENDER_MAIL=skywalking@mail.com
skywalking-routing:
image: skywalking/skywalking-routing:2.1-2017
expose:
- "23000"
ports:
- "23000:23000"
depends_on:
- registry-center-server
links:
- registry-center-server
environment:
- REGISTRY_CENTER_URL=registry-center-server:2181
- ALARM_REDIS_SERVER=redis-server:6379
skywalking-webui:
image: skywalking/skywalking-webui:2.1-2017
image: sky-walking-ui:1.0
expose:
- "8080"
ports:
- "8080:8080"
depends_on:
- mysql-server
- registry-center-server
links:
- mysql-server
- registry-center-server
- skywalking-collector
environment:
- MYSQL_URL=mysql-server:3306
- REGISTRY_CENTER_URL=registry-center-server:2181
- COLLECTOR_SERVERS=skywalking-collector:7001
mysql-server:
image: mysql:5.6
skywalking-collector:
image: skywalking-collector:3.0-2017
expose:
- "3306"
- "7001"
- "1000"
ports:
- "7001:7001"
depends_on:
es-server:
condition: service_healthy
links:
- es-server
environment:
MYSQL_DATABASE: test
MYSQL_ROOT_PASSWORD: root
registry-center-server:
image: zookeeper:3.4.9
expose:
- "2181"
redis-server:
image: redis:3.2.6
expose:
- "6379"
- ES_CLUSTER_NODES=es-server:9300
- HTTP_HOST_NAME=0.0.0.0
alarm-coordinate-zk:
image: zookeeper:3.4.9
es-server:
image: elasticsearch:5.3
command: "-Enode.name=TestNode -Enetwork.host=0.0.0.0 -Ehttp.cors.enabled=true -Ehttp.cors.allow-origin=* -Ethread_pool.bulk.queue_size=1000 -Ecluster.name=CollectorDBCluster"
expose:
- "2181"
\ No newline at end of file
- "9200"
- "9300"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9200"]
interval: 30s
timeout: 10s
retries: 5
\ No newline at end of file
......@@ -182,6 +182,14 @@
</instrumentation>
</configuration>
</plugin>
<plugin>
<groupId>com.spotify</groupId>
<artifactId>docker-maven-plugin</artifactId>
<version>${docker.plugin.version}</version>
<configuration>
<skipDocker>true</skipDocker>
</configuration>
</plugin>
</plugins>
</build>
</project>
@echo off
setlocal
set COLLECOTR_PROCESS_TITLE=Skywalking-Collector
set COLLECTOR_BASE_PATH=%~dp0%..
set COLLECTOR_RUNTIME_OPTIONS="-Xms256M -Xmx512M"
set CLASSPATH=%COLLECTOR_BASE_PATH%\config;
SET CLASSPATH=%COLLECTOR_BASE_PATH%\libs\*;%CLASSPATH%
if ""%JAVA_HOME%"" == """" (
set _EXECJAVA=java
) else (
set _EXECJAVA="%JAVA_HOME%"/bin/java
)
start /MIN "%COLLECOTR_PROCESS_TITLE%" %_EXECJAVA% "%COLLECTOR_RUNTIME_OPTIONS%" -cp "%CLASSPATH%" com.a.eye.skywalking.collector.worker.CollectorBootStartUp &
echo Collector started successfully!
endlocal
#!/usr/bin/env bash
PRG="$0"
PRGDIR=`dirname "$PRG"`
[ -z "$COLLECTOR_HOME" ] && COLLECTOR_HOME=`cd "$PRGDIR/.." >/dev/null; pwd`
COLLECTOR_LOGS_DIR="${COLLECTOR_HOME}/logs"
JAVA_OPTS=" -Xms256M -Xmx512M"
if [ ! -d "${COLLECTOR_HOME}/logs" ]; then
mkdir -p "${COLLECTOR_LOGS_DIR}"
fi
_RUNJAVA=${JAVA_HOME}/bin/java
[ -z "$JAVA_HOME" ] && _RUNJAVA=`java`
CLASSPATH="$COLLECTOR_HOME/config:$CLASSPATH"
for i in "$COLLECTOR_HOME"/libs/*.jar
do
CLASSPATH="$i:$CLASSPATH"
done
echo "Starting collector...."
eval exec "\"$_RUNJAVA\" ${JAVA_OPTS} -classpath $CLASSPATH com.a.eye.skywalking.collector.worker.CollectorBootStartUp \
2>${COLLECTOR_LOGS_DIR}/collector.log 1> /dev/null &"
if [ $? -eq 0 ]; then
sleep 1
echo "Collector started successfully!"
else
echo "Collector started failure!"
exit 1
fi
@echo off
setlocal
call "%~dp0"\collector-service.bat start
endlocal
#!/usr/bin/env bash
PRG="$0"
PRGDIR=`dirname "$PRG"`
EXECUTABLE=collector-service.sh
exec "$PRGDIR"/"$EXECUTABLE" start
FROM openjdk:8-jdk
ENV CLUSTER_CURRENT_HOST_NAME=127.0.0.1 \
CLUSTER_CURRENT_PORT=1000 \
CLUSTER_CURRENT_ROLES=WorkersListener \
CLUSTER_SEED_NODES=127.0.0.1:1000,127.0.0.1:1001 \
ES_CLUSTER_NAME=CollectorDBCluster \
ES_CLUSTER_NODES=127.0.0.1:9300 \
ES_INDEX_SHARDS_NUMBER=2 \
ES_INDEX_REPLICAS_NUMBER=0 \
HTTP_HOST_NAME=127.0.0.1 \
HTTP_PORT=7001 \
HTTP_CONTEXT_PATH=/
ADD skywalking-collector.tar.gz /usr/local
COPY startup.sh /usr/local/skywalking-collector/bin
COPY collector.config /usr/local/skywalking-collector/config
ADD docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh && chmod +x /usr/local/skywalking-collector/bin/startup.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/local/skywalking-collector/bin/startup.sh"]
cluster.current.hostname={CLUSTER_CURRENT_HOST_NAME}
cluster.current.port={CLUSTER_CURRENT_PORT}
cluster.current.roles={CLUSTER_CURRENT_ROLES}
cluster.seed_nodes={CLUSTER_SEED_NODES}
es.cluster.name={ES_CLUSTER_NAME}
es.cluster.nodes={ES_CLUSTER_NODES}
es.cluster.transport.sniffer=true
es.index.shards.number={ES_INDEX_SHARDS_NUMBER}
es.index.replicas.number={ES_INDEX_REPLICAS_NUMBER}
http.hostname={HTTP_HOST_NAME}
http.port={HTTP_PORT}
http.contextPath={HTTP_CONTEXT_PATH}
cache.analysis.size=1024
cache.persistence.size=1024
WorkerNum.Node.NodeCompAgg.Value=10
WorkerNum.Node.NodeMappingDayAgg.Value=10
WorkerNum.Node.NodeMappingHourAgg.Value=10
WorkerNum.Node.NodeMappingMinuteAgg.Value=10
WorkerNum.NodeRef.NodeRefDayAgg.Value=10
WorkerNum.NodeRef.NodeRefHourAgg.Value=10
WorkerNum.NodeRef.NodeRefMinuteAgg.Value=10
WorkerNum.NodeRef.NodeRefResSumDayAgg.Value=10
WorkerNum.NodeRef.NodeRefResSumHourAgg.Value=10
WorkerNum.NodeRef.NodeRefResSumMinuteAgg.Value=10
WorkerNum.GlobalTrace.GlobalTraceAgg.Value=10
Queue.GlobalTrace.GlobalTraceSave.Size=1024
Queue.GlobalTrace.GlobalTraceAnalysis.Size=1024
Queue.Segment.SegmentPost.Size=1024
Queue.Segment.SegmentCostSave.Size=1024
Queue.Segment.SegmentSave.Size=1024
Queue.Segment.SegmentExceptionSave.Size=1024
Queue.Node.NodeCompAnalysis.Size=1024
Queue.Node.NodeMappingDayAnalysis.Size=1024
Queue.Node.NodeMappingHourAnalysis.Size=1024
Queue.Node.NodeMappingMinuteAnalysis.Size=1024
Queue.Node.NodeCompSave.Size=1024
Queue.Node.NodeMappingDaySave.Size=1024
Queue.Node.NodeMappingHourSave.Size=1024
Queue.Node.NodeMappingMinuteSave.Size=1024
Queue.NodeRef.NodeRefDayAnalysis.Size=1024
Queue.NodeRef.NodeRefHourAnalysis.Size=1024
Queue.NodeRef.NodeRefMinuteAnalysis.Size=1024
Queue.NodeRef.NodeRefDaySave.Size=1024
Queue.NodeRef.NodeRefHourSave.Size=1024
Queue.NodeRef.NodeRefMinuteSave.Size=1024
Queue.NodeRef.NodeRefResSumDaySave.Size=1024
Queue.NodeRef.NodeRefResSumHourSave.Size=1024
Queue.NodeRef.NodeRefResSumMinuteSave.Size=1024
Queue.NodeRef.NodeRefResSumDayAnalysis.Size=1024
Queue.NodeRef.NodeRefResSumHourAnalysis.Size=1024
Queue.NodeRef.NodeRefResSumMinuteAnalysis.Size=1024
#!/usr/bin/env bash
echo "replace CLUSTER_CURRENT_HOST_NAME with $CLUSTER_CURRENT_HOST_NAME"
eval sed -i -e 's/\{CLUSTER_CURRENT_HOST_NAME\}/$CLUSTER_CURRENT_HOST_NAME/' /usr/local/skywalking-collector/config/collector.config
echo "replace CLUSTER_CURRENT_PORT with $CLUSTER_CURRENT_PORT"
eval sed -i -e 's/\{CLUSTER_CURRENT_PORT\}/$CLUSTER_CURRENT_PORT/' /usr/local/skywalking-collector/config/collector.config
echo "replace CLUSTER_CURRENT_ROLES with $CLUSTER_CURRENT_ROLES"
eval sed -i -e 's/\{CLUSTER_CURRENT_ROLES\}/$CLUSTER_CURRENT_ROLES/' /usr/local/skywalking-collector/config/collector.config
echo "replace CLUSTER_SEED_NODES with $CLUSTER_SEED_NODES"
eval sed -i -e 's/\{CLUSTER_SEED_NODES\}/$CLUSTER_SEED_NODES/' /usr/local/skywalking-collector/config/collector.config
echo "replace ES_CLUSTER_NAME with $ES_CLUSTER_NAME"
eval sed -i -e 's/\{ES_CLUSTER_NAME\}/$ES_CLUSTER_NAME/' /usr/local/skywalking-collector/config/collector.config
echo "replcae ES_CLUSTER_NODES with $ES_CLUSTER_NODES"
eval sed -i -e 's/\{ES_CLUSTER_NODES\}/$ES_CLUSTER_NODES/' /usr/local/skywalking-collector/config/collector.config
echo "replace ES_INDEX_SHARDS_NUMBER with $ES_INDEX_SHARDS_NUMBER"
eval sed -i -e 's/\{ES_INDEX_SHARDS_NUMBER\}/$ES_INDEX_SHARDS_NUMBER/' /usr/local/skywalking-collector/config/collector.config
echo "replace ES_INDEX_REPLICAS_NUMBER with $ES_INDEX_REPLICAS_NUMBER"
eval sed -i -e 's/\{ES_INDEX_REPLICAS_NUMBER\}/$ES_INDEX_REPLICAS_NUMBER/' /usr/local/skywalking-collector/config/collector.config
echo "replace HTTP_HOST_NAME with $HTTP_HOST_NAME"
eval sed -i -e 's/\{HTTP_HOST_NAME\}/$HTTP_HOST_NAME/' /usr/local/skywalking-collector/config/collector.config
echo "replace HTTP_PORT with $HTTP_PORT"
eval sed -i -e 's/\{HTTP_PORT\}/$HTTP_PORT/' /usr/local/skywalking-collector/config/collector.config
echo "replace HTTP_CONTEXT_PATH with $HTTP_CONTEXT_PATH"
eval sed -i -e 's/\{HTTP_CONTEXT_PATH\}/$HTTP_CONTEXT_PATH/' /usr/local/skywalking-collector/config/collector.config
exec "$@"
\ No newline at end of file
#!/usr/bin/env bash
PRG="$0"
PRGDIR=`dirname "$PRG"`
[ -z "$COLLECTOR_HOME" ] && COLLECTOR_HOME=`cd "$PRGDIR/.." >/dev/null; pwd`
COLLECTOR_LOGS_DIR="${COLLECTOR_HOME}/logs"
JAVA_OPTS=" -Xms256M -Xmx512M"
if [ ! -d "${COLLECTOR_HOME}/logs" ]; then
mkdir -p "${COLLECTOR_LOGS_DIR}"
fi
_RUNJAVA=${JAVA_HOME}/bin/java
[ -z "$JAVA_HOME" ] && _RUNJAVA=`java`
CLASSPATH="$COLLECTOR_HOME/config:$CLASSPATH"
for i in "$COLLECTOR_HOME"/libs/*.jar
do
CLASSPATH="$i:$CLASSPATH"
done
echo "Starting collector...."
$_RUNJAVA ${JAVA_OPTS} -classpath $CLASSPATH com.a.eye.skywalking.collector.worker.CollectorBootStartUp
\ No newline at end of file
......@@ -14,6 +14,9 @@
<properties>
<jetty.version>9.4.2.v20170220</jetty.version>
<main.class>com.a.eye.skywalking.collector.worker.CollectorBootStartUp</main.class>
<docker.cache.imageName>skywalking-collector</docker.cache.imageName>
<docker.imageTag.version>${version}</docker.imageTag.version>
</properties>
<dependencies>
......@@ -54,4 +57,64 @@
<version>RELEASE</version>
</dependency>
</dependencies>
<build>
<finalName>skywalking-collector</finalName>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<excludes>
<exclude>*.xml</exclude>
<exclude>*.config</exclude>
</excludes>
<archive>
<manifest>
<mainClass>${main.class}</mainClass>
</manifest>
</archive>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<executions>
<execution>
<id>assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly.xml</descriptor>
</descriptors>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.spotify</groupId>
<artifactId>docker-maven-plugin</artifactId>
<version>${docker.plugin.version}</version>
<configuration>
<skipDocker>false</skipDocker>
<imageName>${docker.cache.imageName}</imageName>
<imageTags>
<imageTag>${docker.imageTag.version}</imageTag>
</imageTags>
<dockerDirectory>${project.basedir}/docker</dockerDirectory>
<resources>
<resource>
<targetPath>/</targetPath>
<directory>${project.build.directory}</directory>
<include>${build.finalName}.tar.gz</include>
</resource>
</resources>
</configuration>
</plugin>
</plugins>
</build>
</project>
<assembly
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
<id></id>
<formats>
<format>tar.gz</format>
<format>zip</format>
</formats>
<dependencySets>
<dependencySet>
<outputDirectory>/libs</outputDirectory>
<scope>runtime</scope>
</dependencySet>
</dependencySets>
<fileSets>
<fileSet>
<directory>${project.basedir}/bin</directory>
<outputDirectory>/bin</outputDirectory>
<includes>
<include>*.sh</include>
<include>*.bat</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>src/main/resources</directory>
<includes>
<include>collector.config</include>
<include>log4j2.xml</include>
</includes>
<outputDirectory>/config</outputDirectory>
</fileSet>
<fileSet>
<directory>${project.build.directory}</directory>
<outputDirectory>/libs</outputDirectory>
<includes>
<include>${build.finalName}.jar</include>
</includes>
</fileSet>
</fileSets>
</assembly>
\ No newline at end of file
# The remote server should connect to, hostname can be either hostname or IP address.
# Suggestion: set the real ip address.
cluster.current.hostname=127.0.0.1
cluster.current.port=1000
# The roles of this member. List of strings, e.g. roles = A, B
# In the future, the roles are part of the membership information and can be used by
# routers or other services to distribute work to certain member types,
# e.g. front-end and back-end nodes.
# In this version, all members has same roles, each of them will listen others status,
# because of network trouble or member jvm crash or every reason led to not reachable,
# the routers will stop to sending the message to the untouchable member.
cluster.current.port=11800
cluster.current.roles=WorkersListener
cluster.seed_nodes=127.0.0.1:11800
# Initial contact points of the cluster, e.g. seed_nodes = 127.0.0.1:1000, 127.0.0.1:1001.
# The nodes to join automatically at startup.
# When setting akka configuration, it will be change.
# like: ["akka.tcp://system@127.0.0.1:1000", "akka.tcp://system@127.0.0.1:1001"].
# This is akka configuration, see: http://doc.akka.io/docs/akka/2.4/general/configuration.html
cluster.seed_nodes=127.0.0.1:1000,127.0.0.1:1001
# elasticsearch configuration, config/elasticsearch.yml, see cluster.name
es.cluster.name=CollectorDBCluster
es.cluster.transport.sniffer=true
# The elasticsearch nodes of cluster, comma separated, e.g. nodes=ip:port, ip:port
es.cluster.nodes=127.0.0.1:9300
es.cluster.transport.sniffer=true
# Automatic create elasticsearch index
# Options: overwrite, ignore, off
# Overwrite: delete the index then create
# Ignore: just create new index when index not created.
es.index.create=ignore
es.index.shards.number=2
es.index.replicas.number=0
# You can configure a host either as a host name or IP address to identify a specific network
# interface on which to listen.
# Be used for web ui get the view data or agent post the trace segment.
http.hostname=127.0.0.1
# The TCP/IP port on which the connector listens for connections.
http.port=7001
# The contextPath is a URL prefix that identifies which context a HTTP request is destined for.
http.port=12800
http.contextPath=/
# The analysis worker max cache size, when worker data size reach the size,
# then worker will send all cached data to the next worker and clear the cache.
cache.analysis.size=1024
# The persistence worker max cache size, same of "cache.analysis.size" ability.
cache.persistence.size=1024
WorkerNum.Node.NodeCompAgg.Value=10
......
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="info">
<Configuration status="DEBUG">
<Properties>
<Property name="log-path">../logs</Property>
</Properties>
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout charset="UTF-8" pattern="[%d{yyyy-MM-dd HH:mm:ss:SSS}] [%p] - %c{1} - %m%n"/>
</Console>
<RollingFile name="RollingFile" fileName="${log-path}/skywalking-server-log4j2.log"
filePattern="${log-path}/skywalking-server-log4j2-%d{yyyy-MM-dd}-%i.log" >
<PatternLayout>
<pattern>%d - %c -%-4r [%t] %-5p %x - %m%n</pattern>
</PatternLayout>
<Policies>
<SizeBasedTriggeringPolicy size="102400KB" />
</Policies>
<DefaultRolloverStrategy max="30"/>
</RollingFile>
</Appenders>
<Loggers>
<logger name="org.elasticsearch" level="info" additivity="false">
<AppenderRef ref="Console"/>
</logger>
<logger name="com.a.eye.skywalking.collector.worker" level="debug" additivity="false">
<AppenderRef ref="Console"/>
</logger>
<Root level="info">
<AppenderRef ref="Console"/>
<Logger name="root" level="INFO" additivity="false">
<appender-ref ref="RollingFile" level="INFO"/>
</Logger>
<Root level="INFO" additivity="false">
<AppenderRef ref="RollingFile"/>
</Root>
</Loggers>
</Configuration>
</Configuration>
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册