#目的
Lambda+ElasticsearchでELBのログをサーバーレスでKibanaで可視化するを実施していました。
#問題
ELBをアクセスログをS3に出力してLambdaをキックするとことまではできていると思いますが、Lambdaが正常に処理できていません。
AWSやIT分野に踏み入って間もないため、手順に沿った操作のみでデバックなどを行えません。
わかりにくい質問だとおもいますが、お力添えをお願いします。
LambdaのテストはELBのアクセスログから作りました。
実行結果:失敗ログ
{ "errorMessage": "RequestId: 427d0306-8283-48ba-bda1-db5abf626c69 Process exited before completing request" }
ログ出力
START RequestId: 427d0306-8283-48ba-bda1-db5abf626c69 Version: $LATEST 2019-04-06T15:31:08.689Z 427d0306-8283-48ba-bda1-db5abf626c69 Received event: { "Records": [ { "eventVersion": "2.1", "eventSource": "aws:s3", "awsRegion": "us-east-1", "eventTime": "2019-04-06T14:40:05.414Z", "eventName": "ObjectCreated:Put", "userIdentity": { "principalId": "AWS:AIDAITAATZTOMD5FZTPKC" }, "requestParameters": { "sourceIPAddress": "xx.xxx.xxx.100" }, "responseElements": { "x-amz-request-id": "xxxxxxxxxxx", "x-amz-id-2": "YqgtECAZDlrnNB2BigpTW/WDeJD2Htn5X6A6gedxqWegsZPjeo6zRocbzGbm4Kd8hAZ6bjU/BPc=" }, "s3": { "s3SchemaVersion": "1.0", "configurationId": "610b6044-1be4-4edf-bed8-5b34c9ebaad7", "bucket": { "name": "xxxxxxxxxxxxxx", "ownerIdentity": { "principalId": "A14QZLQ6ZIQ2IT" }, "arn": "arn:aws:s3:::xxxxxxxxxxx" }, "object": { "key": "AWSLogs/xxxxxxxxxxxx/elasticloadbalancing/us-east-1/2019/04/06/xxxxxxxxxxxxxxxx_elasticloadbalancing_us-east-1_app.xxxxxxxx.286fc54579578608_20190406T1440Z_52.206.56.136_2bx2g245.log.gz", "size": 361, "eTag": "cb4a738a8828fdecba285c4d2e94a2c6", "sequencer": "005CA8BA455D16FB39" } } } ] } 2019-04-06T15:31:09.226Z 427d0306-8283-48ba-bda1-db5abf626c69 TypeError: Cannot read property 'split' of undefined at module.exports (/var/task/node_modules/elb-log-parser/index.js:70:45) at Transform.recordStream._transform (/var/task/index.js:126:25) at Transform._read (_stream_transform.js:186:10) at Transform._write (_stream_transform.js:174:12) at doWrite (_stream_writable.js:397:12) at writeOrBuffer (_stream_writable.js:383:5) at Transform.Writable.write (_stream_writable.js:290:11) at LineStream.ondata (_stream_readable.js:639:20) at emitOne (events.js:116:13) at LineStream.emit (events.js:211:7) END RequestId: 427d0306-8283-48ba-bda1-db5abf626c69 REPORT RequestId: 427d0306-8283-48ba-bda1-db5abf626c69 Duration: 582.01 ms Billed Duration: 600 ms Memory Size: 256 MB Max Memory Used: 68 MB RequestId: 427d0306-8283-48ba-bda1-db5abf626c69 Process exited before completing request
テストイベント設定
{ "Records": [ { "eventVersion": "2.1", "eventSource": "aws:s3", "awsRegion": "us-east-1", "eventTime": "2019-04-06T14:40:05.414Z", "eventName": "ObjectCreated:Put", "userIdentity": { "principalId": "AWS:AIDAITAATZTOMD5FZTPKC" }, "requestParameters": { "sourceIPAddress": "xx.xxx.xxx.100" }, "responseElements": { "x-amz-request-id": "xxxxxxxxxxxxxxx", "x-amz-id-2": "YqgtECAZDlrnNB2BigpTW/WDeJD2Htn5X6A6gedxqWegsZPjeo6zRocbzGbm4Kd8hAZ6bjU/BPc=" }, "s3": { "s3SchemaVersion": "1.0", "configurationId": "610b6044-1be4-4edf-bed8-5b34c9ebaad7", "bucket": { "name": "xxxxxxxxxxxxxxxx", "ownerIdentity": { "principalId": "A14QZLQ6ZIQ2IT" }, "arn": "arn:aws:s3:::xxxxxxxxxxxxxxxxx" }, "object": { "key": "AWSLogs/xxxxxxxxxxxxxxx/elasticloadbalancing/us-east-1/2019/04/06/9xxxxxxxxxxxxxx_elasticloadbalancing_us-east-1_app.xxxxxxxxxxxxxx.286fc54579578608_20190406T1440Z_52.206.56.136_2bx2g245.log.gz", "size": 361, "eTag": "cb4a738a8828fdecba285c4d2e94a2c6", "sequencer": "005CA8BA455D16FB39" } } } ] }
Lambda関数
/* * Sample node.js code for AWS Lambda to get Apache log files from S3, parse * and add them to an Amazon Elasticsearch Service domain. * * * Copyright 2015- Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at http://aws.amazon.com/asl/ * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* Imports */ var AWS = require('aws-sdk'); var LineStream = require('byline').LineStream; var parse = require('elb-log-parser'); // Apache Common Log Format var path = require('path'); var stream = require('stream'); /* Globals */ var esDomain = { endpoint: 'https://search-test20190406-qludo2zvsek2bhjmfirgynljyi.us-east-1.es.amazonaws.com', region: 'us-east-1', index: 'logs', doctype: 'elb' }; var endpoint = new AWS.Endpoint(esDomain.endpoint); var s3 = new AWS.S3(); var totLogLines = 0; // Total number of log lines in the file var numDocsAdded = 0; // Number of log lines added to ES so far /* * The AWS credentials are picked up from the environment. * They belong to the IAM role assigned to the Lambda function. * Since the ES requests are signed using these credentials, * make sure to apply a policy that permits ES domain operations * to the role. */ var creds = new AWS.EnvironmentCredentials('AWS'); /* * Get the log file from the given S3 bucket and key. Parse it and add * each log record to the ES domain. */ function s3LogsToES(bucket, key, context, lineStream, recordStream) { // Note: The Lambda function should be configured to filter for .log files // (as part of the Event Source "suffix" setting). var s3Stream = s3.getObject({Bucket: bucket, Key: key}).createReadStream(); // Flow: S3 file stream -> Log Line stream -> Log Record stream -> ES s3Stream .pipe(lineStream) .pipe(recordStream) .on('data', function(parsedEntry) { postDocumentToES(parsedEntry, context); }); s3Stream.on('error', function() { console.log( 'Error getting object "' + key + '" from bucket "' + bucket + '". ' + 'Make sure they exist and your bucket is in the same region as this function.'); context.fail(); }); } /* * Add the given document to the ES domain. * If all records are successfully added, indicate success to lambda * (using the "context" parameter). */ function postDocumentToES(doc, context) { var req = new AWS.HttpRequest(endpoint); req.method = 'POST'; req.path = path.join('/', esDomain.index, esDomain.doctype); req.region = esDomain.region; req.body = doc; req.headers['presigned-expires'] = false; req.headers['Host'] = endpoint.host; // Sign the request (Sigv4) var signer = new AWS.Signers.V4(req, 'es'); signer.addAuthorization(creds, new Date()); // Post document to ES var send = new AWS.NodeHttpClient(); send.handleRequest(req, null, function(httpResp) { var body = ''; httpResp.on('data', function (chunk) { body += chunk; }); httpResp.on('end', function (chunk) { numDocsAdded ++; if (numDocsAdded === totLogLines) { // Mark lambda success. If not done so, it will be retried. console.log('All ' + numDocsAdded + ' log records added to ES.'); context.succeed(); } }); }, function(err) { console.log('Error: ' + err); console.log(numDocsAdded + 'of ' + totLogLines + ' log records added to ES.'); context.fail(); }); } /* Lambda "main": Execution starts here */ exports.handler = function(event, context) { console.log('Received event: ', JSON.stringify(event, null, 2)); /* == Streams == * To avoid loading an entire (typically large) log file into memory, * this is implemented as a pipeline of filters, streaming log data * from S3 to ES. * Flow: S3 file stream -> Log Line stream -> Log Record stream -> ES */ var lineStream = new LineStream(); // A stream of log records, from parsing each log line var recordStream = new stream.Transform({objectMode: true}) recordStream._transform = function(line, encoding, done) { var logRecord = parse(line.toString()); var serializedRecord = JSON.stringify(logRecord); this.push(serializedRecord); totLogLines ++; done(); } event.Records.forEach(function(record) { var bucket = record.s3.bucket.name; var objKey = decodeURIComponent(record.s3.object.key.replace(/+/g, ' ')); s3LogsToES(bucket, objKey, context, lineStream, recordStream); }); }
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。
2019/04/09 06:04
2019/04/09 06:07