001    // Generated by the protocol buffer compiler.  DO NOT EDIT!
002    // source: QJournalProtocol.proto
003    
004    package org.apache.hadoop.hdfs.qjournal.protocol;
005    
006    public final class QJournalProtocolProtos {
007      private QJournalProtocolProtos() {}
008      public static void registerAllExtensions(
009          com.google.protobuf.ExtensionRegistry registry) {
010      }
011      public interface JournalIdProtoOrBuilder
012          extends com.google.protobuf.MessageOrBuilder {
013    
014        // required string identifier = 1;
015        /**
016         * <code>required string identifier = 1;</code>
017         */
018        boolean hasIdentifier();
019        /**
020         * <code>required string identifier = 1;</code>
021         */
022        java.lang.String getIdentifier();
023        /**
024         * <code>required string identifier = 1;</code>
025         */
026        com.google.protobuf.ByteString
027            getIdentifierBytes();
028      }
029      /**
030       * Protobuf type {@code hadoop.hdfs.JournalIdProto}
031       */
032      public static final class JournalIdProto extends
033          com.google.protobuf.GeneratedMessage
034          implements JournalIdProtoOrBuilder {
035        // Use JournalIdProto.newBuilder() to construct.
036        private JournalIdProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
037          super(builder);
038          this.unknownFields = builder.getUnknownFields();
039        }
040        private JournalIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
041    
042        private static final JournalIdProto defaultInstance;
043        public static JournalIdProto getDefaultInstance() {
044          return defaultInstance;
045        }
046    
047        public JournalIdProto getDefaultInstanceForType() {
048          return defaultInstance;
049        }
050    
051        private final com.google.protobuf.UnknownFieldSet unknownFields;
052        @java.lang.Override
053        public final com.google.protobuf.UnknownFieldSet
054            getUnknownFields() {
055          return this.unknownFields;
056        }
057        private JournalIdProto(
058            com.google.protobuf.CodedInputStream input,
059            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
060            throws com.google.protobuf.InvalidProtocolBufferException {
061          initFields();
062          int mutable_bitField0_ = 0;
063          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
064              com.google.protobuf.UnknownFieldSet.newBuilder();
065          try {
066            boolean done = false;
067            while (!done) {
068              int tag = input.readTag();
069              switch (tag) {
070                case 0:
071                  done = true;
072                  break;
073                default: {
074                  if (!parseUnknownField(input, unknownFields,
075                                         extensionRegistry, tag)) {
076                    done = true;
077                  }
078                  break;
079                }
080                case 10: {
081                  bitField0_ |= 0x00000001;
082                  identifier_ = input.readBytes();
083                  break;
084                }
085              }
086            }
087          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
088            throw e.setUnfinishedMessage(this);
089          } catch (java.io.IOException e) {
090            throw new com.google.protobuf.InvalidProtocolBufferException(
091                e.getMessage()).setUnfinishedMessage(this);
092          } finally {
093            this.unknownFields = unknownFields.build();
094            makeExtensionsImmutable();
095          }
096        }
097        public static final com.google.protobuf.Descriptors.Descriptor
098            getDescriptor() {
099          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
100        }
101    
102        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
103            internalGetFieldAccessorTable() {
104          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
105              .ensureFieldAccessorsInitialized(
106                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
107        }
108    
109        public static com.google.protobuf.Parser<JournalIdProto> PARSER =
110            new com.google.protobuf.AbstractParser<JournalIdProto>() {
111          public JournalIdProto parsePartialFrom(
112              com.google.protobuf.CodedInputStream input,
113              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
114              throws com.google.protobuf.InvalidProtocolBufferException {
115            return new JournalIdProto(input, extensionRegistry);
116          }
117        };
118    
119        @java.lang.Override
120        public com.google.protobuf.Parser<JournalIdProto> getParserForType() {
121          return PARSER;
122        }
123    
124        private int bitField0_;
125        // required string identifier = 1;
126        public static final int IDENTIFIER_FIELD_NUMBER = 1;
127        private java.lang.Object identifier_;
128        /**
129         * <code>required string identifier = 1;</code>
130         */
131        public boolean hasIdentifier() {
132          return ((bitField0_ & 0x00000001) == 0x00000001);
133        }
134        /**
135         * <code>required string identifier = 1;</code>
136         */
137        public java.lang.String getIdentifier() {
138          java.lang.Object ref = identifier_;
139          if (ref instanceof java.lang.String) {
140            return (java.lang.String) ref;
141          } else {
142            com.google.protobuf.ByteString bs = 
143                (com.google.protobuf.ByteString) ref;
144            java.lang.String s = bs.toStringUtf8();
145            if (bs.isValidUtf8()) {
146              identifier_ = s;
147            }
148            return s;
149          }
150        }
151        /**
152         * <code>required string identifier = 1;</code>
153         */
154        public com.google.protobuf.ByteString
155            getIdentifierBytes() {
156          java.lang.Object ref = identifier_;
157          if (ref instanceof java.lang.String) {
158            com.google.protobuf.ByteString b = 
159                com.google.protobuf.ByteString.copyFromUtf8(
160                    (java.lang.String) ref);
161            identifier_ = b;
162            return b;
163          } else {
164            return (com.google.protobuf.ByteString) ref;
165          }
166        }
167    
168        private void initFields() {
169          identifier_ = "";
170        }
171        private byte memoizedIsInitialized = -1;
172        public final boolean isInitialized() {
173          byte isInitialized = memoizedIsInitialized;
174          if (isInitialized != -1) return isInitialized == 1;
175    
176          if (!hasIdentifier()) {
177            memoizedIsInitialized = 0;
178            return false;
179          }
180          memoizedIsInitialized = 1;
181          return true;
182        }
183    
184        public void writeTo(com.google.protobuf.CodedOutputStream output)
185                            throws java.io.IOException {
186          getSerializedSize();
187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
188            output.writeBytes(1, getIdentifierBytes());
189          }
190          getUnknownFields().writeTo(output);
191        }
192    
193        private int memoizedSerializedSize = -1;
194        public int getSerializedSize() {
195          int size = memoizedSerializedSize;
196          if (size != -1) return size;
197    
198          size = 0;
199          if (((bitField0_ & 0x00000001) == 0x00000001)) {
200            size += com.google.protobuf.CodedOutputStream
201              .computeBytesSize(1, getIdentifierBytes());
202          }
203          size += getUnknownFields().getSerializedSize();
204          memoizedSerializedSize = size;
205          return size;
206        }
207    
208        private static final long serialVersionUID = 0L;
209        @java.lang.Override
210        protected java.lang.Object writeReplace()
211            throws java.io.ObjectStreamException {
212          return super.writeReplace();
213        }
214    
215        @java.lang.Override
216        public boolean equals(final java.lang.Object obj) {
217          if (obj == this) {
218           return true;
219          }
220          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
221            return super.equals(obj);
222          }
223          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
224    
225          boolean result = true;
226          result = result && (hasIdentifier() == other.hasIdentifier());
227          if (hasIdentifier()) {
228            result = result && getIdentifier()
229                .equals(other.getIdentifier());
230          }
231          result = result &&
232              getUnknownFields().equals(other.getUnknownFields());
233          return result;
234        }
235    
236        private int memoizedHashCode = 0;
237        @java.lang.Override
238        public int hashCode() {
239          if (memoizedHashCode != 0) {
240            return memoizedHashCode;
241          }
242          int hash = 41;
243          hash = (19 * hash) + getDescriptorForType().hashCode();
244          if (hasIdentifier()) {
245            hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
246            hash = (53 * hash) + getIdentifier().hashCode();
247          }
248          hash = (29 * hash) + getUnknownFields().hashCode();
249          memoizedHashCode = hash;
250          return hash;
251        }
252    
253        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
254            com.google.protobuf.ByteString data)
255            throws com.google.protobuf.InvalidProtocolBufferException {
256          return PARSER.parseFrom(data);
257        }
258        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
259            com.google.protobuf.ByteString data,
260            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
261            throws com.google.protobuf.InvalidProtocolBufferException {
262          return PARSER.parseFrom(data, extensionRegistry);
263        }
264        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
265            throws com.google.protobuf.InvalidProtocolBufferException {
266          return PARSER.parseFrom(data);
267        }
268        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
269            byte[] data,
270            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
271            throws com.google.protobuf.InvalidProtocolBufferException {
272          return PARSER.parseFrom(data, extensionRegistry);
273        }
274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
275            throws java.io.IOException {
276          return PARSER.parseFrom(input);
277        }
278        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
279            java.io.InputStream input,
280            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
281            throws java.io.IOException {
282          return PARSER.parseFrom(input, extensionRegistry);
283        }
284        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
285            throws java.io.IOException {
286          return PARSER.parseDelimitedFrom(input);
287        }
288        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
289            java.io.InputStream input,
290            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
291            throws java.io.IOException {
292          return PARSER.parseDelimitedFrom(input, extensionRegistry);
293        }
294        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
295            com.google.protobuf.CodedInputStream input)
296            throws java.io.IOException {
297          return PARSER.parseFrom(input);
298        }
299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
300            com.google.protobuf.CodedInputStream input,
301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
302            throws java.io.IOException {
303          return PARSER.parseFrom(input, extensionRegistry);
304        }
305    
306        public static Builder newBuilder() { return Builder.create(); }
307        public Builder newBuilderForType() { return newBuilder(); }
308        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
309          return newBuilder().mergeFrom(prototype);
310        }
311        public Builder toBuilder() { return newBuilder(this); }
312    
313        @java.lang.Override
314        protected Builder newBuilderForType(
315            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
316          Builder builder = new Builder(parent);
317          return builder;
318        }
319        /**
320         * Protobuf type {@code hadoop.hdfs.JournalIdProto}
321         */
322        public static final class Builder extends
323            com.google.protobuf.GeneratedMessage.Builder<Builder>
324           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
325          public static final com.google.protobuf.Descriptors.Descriptor
326              getDescriptor() {
327            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
328          }
329    
330          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
331              internalGetFieldAccessorTable() {
332            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
333                .ensureFieldAccessorsInitialized(
334                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
335          }
336    
337          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
338          private Builder() {
339            maybeForceBuilderInitialization();
340          }
341    
342          private Builder(
343              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
344            super(parent);
345            maybeForceBuilderInitialization();
346          }
347          private void maybeForceBuilderInitialization() {
348            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
349            }
350          }
351          private static Builder create() {
352            return new Builder();
353          }
354    
355          public Builder clear() {
356            super.clear();
357            identifier_ = "";
358            bitField0_ = (bitField0_ & ~0x00000001);
359            return this;
360          }
361    
362          public Builder clone() {
363            return create().mergeFrom(buildPartial());
364          }
365    
366          public com.google.protobuf.Descriptors.Descriptor
367              getDescriptorForType() {
368            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
369          }
370    
371          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
372            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
373          }
374    
375          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
376            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
377            if (!result.isInitialized()) {
378              throw newUninitializedMessageException(result);
379            }
380            return result;
381          }
382    
383          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
384            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
385            int from_bitField0_ = bitField0_;
386            int to_bitField0_ = 0;
387            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
388              to_bitField0_ |= 0x00000001;
389            }
390            result.identifier_ = identifier_;
391            result.bitField0_ = to_bitField0_;
392            onBuilt();
393            return result;
394          }
395    
396          public Builder mergeFrom(com.google.protobuf.Message other) {
397            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
398              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
399            } else {
400              super.mergeFrom(other);
401              return this;
402            }
403          }
404    
405          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
406            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
407            if (other.hasIdentifier()) {
408              bitField0_ |= 0x00000001;
409              identifier_ = other.identifier_;
410              onChanged();
411            }
412            this.mergeUnknownFields(other.getUnknownFields());
413            return this;
414          }
415    
416          public final boolean isInitialized() {
417            if (!hasIdentifier()) {
418              
419              return false;
420            }
421            return true;
422          }
423    
424          public Builder mergeFrom(
425              com.google.protobuf.CodedInputStream input,
426              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
427              throws java.io.IOException {
428            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parsedMessage = null;
429            try {
430              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
431            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
432              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) e.getUnfinishedMessage();
433              throw e;
434            } finally {
435              if (parsedMessage != null) {
436                mergeFrom(parsedMessage);
437              }
438            }
439            return this;
440          }
441          private int bitField0_;
442    
443          // required string identifier = 1;
444          private java.lang.Object identifier_ = "";
445          /**
446           * <code>required string identifier = 1;</code>
447           */
448          public boolean hasIdentifier() {
449            return ((bitField0_ & 0x00000001) == 0x00000001);
450          }
451          /**
452           * <code>required string identifier = 1;</code>
453           */
454          public java.lang.String getIdentifier() {
455            java.lang.Object ref = identifier_;
456            if (!(ref instanceof java.lang.String)) {
457              java.lang.String s = ((com.google.protobuf.ByteString) ref)
458                  .toStringUtf8();
459              identifier_ = s;
460              return s;
461            } else {
462              return (java.lang.String) ref;
463            }
464          }
465          /**
466           * <code>required string identifier = 1;</code>
467           */
468          public com.google.protobuf.ByteString
469              getIdentifierBytes() {
470            java.lang.Object ref = identifier_;
471            if (ref instanceof String) {
472              com.google.protobuf.ByteString b = 
473                  com.google.protobuf.ByteString.copyFromUtf8(
474                      (java.lang.String) ref);
475              identifier_ = b;
476              return b;
477            } else {
478              return (com.google.protobuf.ByteString) ref;
479            }
480          }
481          /**
482           * <code>required string identifier = 1;</code>
483           */
484          public Builder setIdentifier(
485              java.lang.String value) {
486            if (value == null) {
487        throw new NullPointerException();
488      }
489      bitField0_ |= 0x00000001;
490            identifier_ = value;
491            onChanged();
492            return this;
493          }
494          /**
495           * <code>required string identifier = 1;</code>
496           */
497          public Builder clearIdentifier() {
498            bitField0_ = (bitField0_ & ~0x00000001);
499            identifier_ = getDefaultInstance().getIdentifier();
500            onChanged();
501            return this;
502          }
503          /**
504           * <code>required string identifier = 1;</code>
505           */
506          public Builder setIdentifierBytes(
507              com.google.protobuf.ByteString value) {
508            if (value == null) {
509        throw new NullPointerException();
510      }
511      bitField0_ |= 0x00000001;
512            identifier_ = value;
513            onChanged();
514            return this;
515          }
516    
517          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalIdProto)
518        }
519    
520        static {
521          defaultInstance = new JournalIdProto(true);
522          defaultInstance.initFields();
523        }
524    
525        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalIdProto)
526      }
527    
528      public interface RequestInfoProtoOrBuilder
529          extends com.google.protobuf.MessageOrBuilder {
530    
531        // required .hadoop.hdfs.JournalIdProto journalId = 1;
532        /**
533         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
534         */
535        boolean hasJournalId();
536        /**
537         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
538         */
539        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
540        /**
541         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
542         */
543        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
544    
545        // required uint64 epoch = 2;
546        /**
547         * <code>required uint64 epoch = 2;</code>
548         */
549        boolean hasEpoch();
550        /**
551         * <code>required uint64 epoch = 2;</code>
552         */
553        long getEpoch();
554    
555        // required uint64 ipcSerialNumber = 3;
556        /**
557         * <code>required uint64 ipcSerialNumber = 3;</code>
558         */
559        boolean hasIpcSerialNumber();
560        /**
561         * <code>required uint64 ipcSerialNumber = 3;</code>
562         */
563        long getIpcSerialNumber();
564    
565        // optional uint64 committedTxId = 4;
566        /**
567         * <code>optional uint64 committedTxId = 4;</code>
568         *
569         * <pre>
570         * Whenever a writer makes a request, it informs
571         * the node of the latest committed txid. This may
572         * be higher than the transaction data included in the
573         * request itself, eg in the case that the node has
574         * fallen behind.
575         * </pre>
576         */
577        boolean hasCommittedTxId();
578        /**
579         * <code>optional uint64 committedTxId = 4;</code>
580         *
581         * <pre>
582         * Whenever a writer makes a request, it informs
583         * the node of the latest committed txid. This may
584         * be higher than the transaction data included in the
585         * request itself, eg in the case that the node has
586         * fallen behind.
587         * </pre>
588         */
589        long getCommittedTxId();
590      }
591      /**
592       * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
593       */
594      public static final class RequestInfoProto extends
595          com.google.protobuf.GeneratedMessage
596          implements RequestInfoProtoOrBuilder {
597        // Use RequestInfoProto.newBuilder() to construct.
598        private RequestInfoProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
599          super(builder);
600          this.unknownFields = builder.getUnknownFields();
601        }
602        private RequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
603    
604        private static final RequestInfoProto defaultInstance;
605        public static RequestInfoProto getDefaultInstance() {
606          return defaultInstance;
607        }
608    
609        public RequestInfoProto getDefaultInstanceForType() {
610          return defaultInstance;
611        }
612    
613        private final com.google.protobuf.UnknownFieldSet unknownFields;
614        @java.lang.Override
615        public final com.google.protobuf.UnknownFieldSet
616            getUnknownFields() {
617          return this.unknownFields;
618        }
619        private RequestInfoProto(
620            com.google.protobuf.CodedInputStream input,
621            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
622            throws com.google.protobuf.InvalidProtocolBufferException {
623          initFields();
624          int mutable_bitField0_ = 0;
625          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
626              com.google.protobuf.UnknownFieldSet.newBuilder();
627          try {
628            boolean done = false;
629            while (!done) {
630              int tag = input.readTag();
631              switch (tag) {
632                case 0:
633                  done = true;
634                  break;
635                default: {
636                  if (!parseUnknownField(input, unknownFields,
637                                         extensionRegistry, tag)) {
638                    done = true;
639                  }
640                  break;
641                }
642                case 10: {
643                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
644                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
645                    subBuilder = journalId_.toBuilder();
646                  }
647                  journalId_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
648                  if (subBuilder != null) {
649                    subBuilder.mergeFrom(journalId_);
650                    journalId_ = subBuilder.buildPartial();
651                  }
652                  bitField0_ |= 0x00000001;
653                  break;
654                }
655                case 16: {
656                  bitField0_ |= 0x00000002;
657                  epoch_ = input.readUInt64();
658                  break;
659                }
660                case 24: {
661                  bitField0_ |= 0x00000004;
662                  ipcSerialNumber_ = input.readUInt64();
663                  break;
664                }
665                case 32: {
666                  bitField0_ |= 0x00000008;
667                  committedTxId_ = input.readUInt64();
668                  break;
669                }
670              }
671            }
672          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
673            throw e.setUnfinishedMessage(this);
674          } catch (java.io.IOException e) {
675            throw new com.google.protobuf.InvalidProtocolBufferException(
676                e.getMessage()).setUnfinishedMessage(this);
677          } finally {
678            this.unknownFields = unknownFields.build();
679            makeExtensionsImmutable();
680          }
681        }
682        public static final com.google.protobuf.Descriptors.Descriptor
683            getDescriptor() {
684          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
685        }
686    
687        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
688            internalGetFieldAccessorTable() {
689          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
690              .ensureFieldAccessorsInitialized(
691                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
692        }
693    
694        public static com.google.protobuf.Parser<RequestInfoProto> PARSER =
695            new com.google.protobuf.AbstractParser<RequestInfoProto>() {
696          public RequestInfoProto parsePartialFrom(
697              com.google.protobuf.CodedInputStream input,
698              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
699              throws com.google.protobuf.InvalidProtocolBufferException {
700            return new RequestInfoProto(input, extensionRegistry);
701          }
702        };
703    
704        @java.lang.Override
705        public com.google.protobuf.Parser<RequestInfoProto> getParserForType() {
706          return PARSER;
707        }
708    
709        private int bitField0_;
710        // required .hadoop.hdfs.JournalIdProto journalId = 1;
711        public static final int JOURNALID_FIELD_NUMBER = 1;
712        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
713        /**
714         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
715         */
716        public boolean hasJournalId() {
717          return ((bitField0_ & 0x00000001) == 0x00000001);
718        }
719        /**
720         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
721         */
722        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
723          return journalId_;
724        }
725        /**
726         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
727         */
728        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
729          return journalId_;
730        }
731    
732        // required uint64 epoch = 2;
733        public static final int EPOCH_FIELD_NUMBER = 2;
734        private long epoch_;
735        /**
736         * <code>required uint64 epoch = 2;</code>
737         */
738        public boolean hasEpoch() {
739          return ((bitField0_ & 0x00000002) == 0x00000002);
740        }
741        /**
742         * <code>required uint64 epoch = 2;</code>
743         */
744        public long getEpoch() {
745          return epoch_;
746        }
747    
748        // required uint64 ipcSerialNumber = 3;
749        public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
750        private long ipcSerialNumber_;
751        /**
752         * <code>required uint64 ipcSerialNumber = 3;</code>
753         */
754        public boolean hasIpcSerialNumber() {
755          return ((bitField0_ & 0x00000004) == 0x00000004);
756        }
757        /**
758         * <code>required uint64 ipcSerialNumber = 3;</code>
759         */
760        public long getIpcSerialNumber() {
761          return ipcSerialNumber_;
762        }
763    
764        // optional uint64 committedTxId = 4;
765        public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
766        private long committedTxId_;
767        /**
768         * <code>optional uint64 committedTxId = 4;</code>
769         *
770         * <pre>
771         * Whenever a writer makes a request, it informs
772         * the node of the latest committed txid. This may
773         * be higher than the transaction data included in the
774         * request itself, eg in the case that the node has
775         * fallen behind.
776         * </pre>
777         */
778        public boolean hasCommittedTxId() {
779          return ((bitField0_ & 0x00000008) == 0x00000008);
780        }
781        /**
782         * <code>optional uint64 committedTxId = 4;</code>
783         *
784         * <pre>
785         * Whenever a writer makes a request, it informs
786         * the node of the latest committed txid. This may
787         * be higher than the transaction data included in the
788         * request itself, eg in the case that the node has
789         * fallen behind.
790         * </pre>
791         */
792        public long getCommittedTxId() {
793          return committedTxId_;
794        }
795    
796        private void initFields() {
797          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
798          epoch_ = 0L;
799          ipcSerialNumber_ = 0L;
800          committedTxId_ = 0L;
801        }
802        private byte memoizedIsInitialized = -1;
803        public final boolean isInitialized() {
804          byte isInitialized = memoizedIsInitialized;
805          if (isInitialized != -1) return isInitialized == 1;
806    
807          if (!hasJournalId()) {
808            memoizedIsInitialized = 0;
809            return false;
810          }
811          if (!hasEpoch()) {
812            memoizedIsInitialized = 0;
813            return false;
814          }
815          if (!hasIpcSerialNumber()) {
816            memoizedIsInitialized = 0;
817            return false;
818          }
819          if (!getJournalId().isInitialized()) {
820            memoizedIsInitialized = 0;
821            return false;
822          }
823          memoizedIsInitialized = 1;
824          return true;
825        }
826    
827        public void writeTo(com.google.protobuf.CodedOutputStream output)
828                            throws java.io.IOException {
829          getSerializedSize();
830          if (((bitField0_ & 0x00000001) == 0x00000001)) {
831            output.writeMessage(1, journalId_);
832          }
833          if (((bitField0_ & 0x00000002) == 0x00000002)) {
834            output.writeUInt64(2, epoch_);
835          }
836          if (((bitField0_ & 0x00000004) == 0x00000004)) {
837            output.writeUInt64(3, ipcSerialNumber_);
838          }
839          if (((bitField0_ & 0x00000008) == 0x00000008)) {
840            output.writeUInt64(4, committedTxId_);
841          }
842          getUnknownFields().writeTo(output);
843        }
844    
845        private int memoizedSerializedSize = -1;
846        public int getSerializedSize() {
847          int size = memoizedSerializedSize;
848          if (size != -1) return size;
849    
850          size = 0;
851          if (((bitField0_ & 0x00000001) == 0x00000001)) {
852            size += com.google.protobuf.CodedOutputStream
853              .computeMessageSize(1, journalId_);
854          }
855          if (((bitField0_ & 0x00000002) == 0x00000002)) {
856            size += com.google.protobuf.CodedOutputStream
857              .computeUInt64Size(2, epoch_);
858          }
859          if (((bitField0_ & 0x00000004) == 0x00000004)) {
860            size += com.google.protobuf.CodedOutputStream
861              .computeUInt64Size(3, ipcSerialNumber_);
862          }
863          if (((bitField0_ & 0x00000008) == 0x00000008)) {
864            size += com.google.protobuf.CodedOutputStream
865              .computeUInt64Size(4, committedTxId_);
866          }
867          size += getUnknownFields().getSerializedSize();
868          memoizedSerializedSize = size;
869          return size;
870        }
871    
872        private static final long serialVersionUID = 0L;
873        @java.lang.Override
874        protected java.lang.Object writeReplace()
875            throws java.io.ObjectStreamException {
876          return super.writeReplace();
877        }
878    
879        @java.lang.Override
880        public boolean equals(final java.lang.Object obj) {
881          if (obj == this) {
882           return true;
883          }
884          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
885            return super.equals(obj);
886          }
887          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
888    
889          boolean result = true;
890          result = result && (hasJournalId() == other.hasJournalId());
891          if (hasJournalId()) {
892            result = result && getJournalId()
893                .equals(other.getJournalId());
894          }
895          result = result && (hasEpoch() == other.hasEpoch());
896          if (hasEpoch()) {
897            result = result && (getEpoch()
898                == other.getEpoch());
899          }
900          result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
901          if (hasIpcSerialNumber()) {
902            result = result && (getIpcSerialNumber()
903                == other.getIpcSerialNumber());
904          }
905          result = result && (hasCommittedTxId() == other.hasCommittedTxId());
906          if (hasCommittedTxId()) {
907            result = result && (getCommittedTxId()
908                == other.getCommittedTxId());
909          }
910          result = result &&
911              getUnknownFields().equals(other.getUnknownFields());
912          return result;
913        }
914    
915        private int memoizedHashCode = 0;
916        @java.lang.Override
917        public int hashCode() {
918          if (memoizedHashCode != 0) {
919            return memoizedHashCode;
920          }
921          int hash = 41;
922          hash = (19 * hash) + getDescriptorForType().hashCode();
923          if (hasJournalId()) {
924            hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
925            hash = (53 * hash) + getJournalId().hashCode();
926          }
927          if (hasEpoch()) {
928            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
929            hash = (53 * hash) + hashLong(getEpoch());
930          }
931          if (hasIpcSerialNumber()) {
932            hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
933            hash = (53 * hash) + hashLong(getIpcSerialNumber());
934          }
935          if (hasCommittedTxId()) {
936            hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
937            hash = (53 * hash) + hashLong(getCommittedTxId());
938          }
939          hash = (29 * hash) + getUnknownFields().hashCode();
940          memoizedHashCode = hash;
941          return hash;
942        }
943    
944        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
945            com.google.protobuf.ByteString data)
946            throws com.google.protobuf.InvalidProtocolBufferException {
947          return PARSER.parseFrom(data);
948        }
949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
950            com.google.protobuf.ByteString data,
951            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
952            throws com.google.protobuf.InvalidProtocolBufferException {
953          return PARSER.parseFrom(data, extensionRegistry);
954        }
955        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
956            throws com.google.protobuf.InvalidProtocolBufferException {
957          return PARSER.parseFrom(data);
958        }
959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
960            byte[] data,
961            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
962            throws com.google.protobuf.InvalidProtocolBufferException {
963          return PARSER.parseFrom(data, extensionRegistry);
964        }
965        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
966            throws java.io.IOException {
967          return PARSER.parseFrom(input);
968        }
969        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
970            java.io.InputStream input,
971            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
972            throws java.io.IOException {
973          return PARSER.parseFrom(input, extensionRegistry);
974        }
975        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
976            throws java.io.IOException {
977          return PARSER.parseDelimitedFrom(input);
978        }
979        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
980            java.io.InputStream input,
981            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
982            throws java.io.IOException {
983          return PARSER.parseDelimitedFrom(input, extensionRegistry);
984        }
985        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
986            com.google.protobuf.CodedInputStream input)
987            throws java.io.IOException {
988          return PARSER.parseFrom(input);
989        }
990        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
991            com.google.protobuf.CodedInputStream input,
992            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
993            throws java.io.IOException {
994          return PARSER.parseFrom(input, extensionRegistry);
995        }
996    
997        public static Builder newBuilder() { return Builder.create(); }
998        public Builder newBuilderForType() { return newBuilder(); }
999        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
1000          return newBuilder().mergeFrom(prototype);
1001        }
1002        public Builder toBuilder() { return newBuilder(this); }
1003    
1004        @java.lang.Override
1005        protected Builder newBuilderForType(
1006            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1007          Builder builder = new Builder(parent);
1008          return builder;
1009        }
1010        /**
1011         * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
1012         */
1013        public static final class Builder extends
1014            com.google.protobuf.GeneratedMessage.Builder<Builder>
1015           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
1016          public static final com.google.protobuf.Descriptors.Descriptor
1017              getDescriptor() {
1018            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1019          }
1020    
1021          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1022              internalGetFieldAccessorTable() {
1023            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
1024                .ensureFieldAccessorsInitialized(
1025                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
1026          }
1027    
1028          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
1029          private Builder() {
1030            maybeForceBuilderInitialization();
1031          }
1032    
1033          private Builder(
1034              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1035            super(parent);
1036            maybeForceBuilderInitialization();
1037          }
1038          private void maybeForceBuilderInitialization() {
1039            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1040              getJournalIdFieldBuilder();
1041            }
1042          }
1043          private static Builder create() {
1044            return new Builder();
1045          }
1046    
1047          public Builder clear() {
1048            super.clear();
1049            if (journalIdBuilder_ == null) {
1050              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1051            } else {
1052              journalIdBuilder_.clear();
1053            }
1054            bitField0_ = (bitField0_ & ~0x00000001);
1055            epoch_ = 0L;
1056            bitField0_ = (bitField0_ & ~0x00000002);
1057            ipcSerialNumber_ = 0L;
1058            bitField0_ = (bitField0_ & ~0x00000004);
1059            committedTxId_ = 0L;
1060            bitField0_ = (bitField0_ & ~0x00000008);
1061            return this;
1062          }
1063    
1064          public Builder clone() {
1065            return create().mergeFrom(buildPartial());
1066          }
1067    
1068          public com.google.protobuf.Descriptors.Descriptor
1069              getDescriptorForType() {
1070            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1071          }
1072    
1073          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
1074            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
1075          }
1076    
1077          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
1078            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
1079            if (!result.isInitialized()) {
1080              throw newUninitializedMessageException(result);
1081            }
1082            return result;
1083          }
1084    
1085          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
1086            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
1087            int from_bitField0_ = bitField0_;
1088            int to_bitField0_ = 0;
1089            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1090              to_bitField0_ |= 0x00000001;
1091            }
1092            if (journalIdBuilder_ == null) {
1093              result.journalId_ = journalId_;
1094            } else {
1095              result.journalId_ = journalIdBuilder_.build();
1096            }
1097            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1098              to_bitField0_ |= 0x00000002;
1099            }
1100            result.epoch_ = epoch_;
1101            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1102              to_bitField0_ |= 0x00000004;
1103            }
1104            result.ipcSerialNumber_ = ipcSerialNumber_;
1105            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1106              to_bitField0_ |= 0x00000008;
1107            }
1108            result.committedTxId_ = committedTxId_;
1109            result.bitField0_ = to_bitField0_;
1110            onBuilt();
1111            return result;
1112          }
1113    
1114          public Builder mergeFrom(com.google.protobuf.Message other) {
1115            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
1116              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
1117            } else {
1118              super.mergeFrom(other);
1119              return this;
1120            }
1121          }
1122    
1123          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
1124            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
1125            if (other.hasJournalId()) {
1126              mergeJournalId(other.getJournalId());
1127            }
1128            if (other.hasEpoch()) {
1129              setEpoch(other.getEpoch());
1130            }
1131            if (other.hasIpcSerialNumber()) {
1132              setIpcSerialNumber(other.getIpcSerialNumber());
1133            }
1134            if (other.hasCommittedTxId()) {
1135              setCommittedTxId(other.getCommittedTxId());
1136            }
1137            this.mergeUnknownFields(other.getUnknownFields());
1138            return this;
1139          }
1140    
1141          public final boolean isInitialized() {
1142            if (!hasJournalId()) {
1143              
1144              return false;
1145            }
1146            if (!hasEpoch()) {
1147              
1148              return false;
1149            }
1150            if (!hasIpcSerialNumber()) {
1151              
1152              return false;
1153            }
1154            if (!getJournalId().isInitialized()) {
1155              
1156              return false;
1157            }
1158            return true;
1159          }
1160    
1161          public Builder mergeFrom(
1162              com.google.protobuf.CodedInputStream input,
1163              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1164              throws java.io.IOException {
1165            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parsedMessage = null;
1166            try {
1167              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1168            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1169              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) e.getUnfinishedMessage();
1170              throw e;
1171            } finally {
1172              if (parsedMessage != null) {
1173                mergeFrom(parsedMessage);
1174              }
1175            }
1176            return this;
1177          }
1178          private int bitField0_;
1179    
1180          // required .hadoop.hdfs.JournalIdProto journalId = 1;
1181          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1182          private com.google.protobuf.SingleFieldBuilder<
1183              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
1184          /**
1185           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1186           */
1187          public boolean hasJournalId() {
1188            return ((bitField0_ & 0x00000001) == 0x00000001);
1189          }
1190          /**
1191           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1192           */
1193          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
1194            if (journalIdBuilder_ == null) {
1195              return journalId_;
1196            } else {
1197              return journalIdBuilder_.getMessage();
1198            }
1199          }
1200          /**
1201           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1202           */
1203          public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1204            if (journalIdBuilder_ == null) {
1205              if (value == null) {
1206                throw new NullPointerException();
1207              }
1208              journalId_ = value;
1209              onChanged();
1210            } else {
1211              journalIdBuilder_.setMessage(value);
1212            }
1213            bitField0_ |= 0x00000001;
1214            return this;
1215          }
1216          /**
1217           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1218           */
1219          public Builder setJournalId(
1220              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
1221            if (journalIdBuilder_ == null) {
1222              journalId_ = builderForValue.build();
1223              onChanged();
1224            } else {
1225              journalIdBuilder_.setMessage(builderForValue.build());
1226            }
1227            bitField0_ |= 0x00000001;
1228            return this;
1229          }
1230          /**
1231           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1232           */
1233          public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1234            if (journalIdBuilder_ == null) {
1235              if (((bitField0_ & 0x00000001) == 0x00000001) &&
1236                  journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
1237                journalId_ =
1238                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
1239              } else {
1240                journalId_ = value;
1241              }
1242              onChanged();
1243            } else {
1244              journalIdBuilder_.mergeFrom(value);
1245            }
1246            bitField0_ |= 0x00000001;
1247            return this;
1248          }
1249          /**
1250           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1251           */
1252          public Builder clearJournalId() {
1253            if (journalIdBuilder_ == null) {
1254              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1255              onChanged();
1256            } else {
1257              journalIdBuilder_.clear();
1258            }
1259            bitField0_ = (bitField0_ & ~0x00000001);
1260            return this;
1261          }
1262          /**
1263           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1264           */
1265          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
1266            bitField0_ |= 0x00000001;
1267            onChanged();
1268            return getJournalIdFieldBuilder().getBuilder();
1269          }
1270          /**
1271           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1272           */
1273          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
1274            if (journalIdBuilder_ != null) {
1275              return journalIdBuilder_.getMessageOrBuilder();
1276            } else {
1277              return journalId_;
1278            }
1279          }
1280          /**
1281           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1282           */
1283          private com.google.protobuf.SingleFieldBuilder<
1284              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
1285              getJournalIdFieldBuilder() {
1286            if (journalIdBuilder_ == null) {
1287              journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1288                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
1289                      journalId_,
1290                      getParentForChildren(),
1291                      isClean());
1292              journalId_ = null;
1293            }
1294            return journalIdBuilder_;
1295          }
1296    
1297          // required uint64 epoch = 2;
1298          private long epoch_ ;
1299          /**
1300           * <code>required uint64 epoch = 2;</code>
1301           */
1302          public boolean hasEpoch() {
1303            return ((bitField0_ & 0x00000002) == 0x00000002);
1304          }
1305          /**
1306           * <code>required uint64 epoch = 2;</code>
1307           */
1308          public long getEpoch() {
1309            return epoch_;
1310          }
1311          /**
1312           * <code>required uint64 epoch = 2;</code>
1313           */
1314          public Builder setEpoch(long value) {
1315            bitField0_ |= 0x00000002;
1316            epoch_ = value;
1317            onChanged();
1318            return this;
1319          }
1320          /**
1321           * <code>required uint64 epoch = 2;</code>
1322           */
1323          public Builder clearEpoch() {
1324            bitField0_ = (bitField0_ & ~0x00000002);
1325            epoch_ = 0L;
1326            onChanged();
1327            return this;
1328          }
1329    
1330          // required uint64 ipcSerialNumber = 3;
1331          private long ipcSerialNumber_ ;
1332          /**
1333           * <code>required uint64 ipcSerialNumber = 3;</code>
1334           */
1335          public boolean hasIpcSerialNumber() {
1336            return ((bitField0_ & 0x00000004) == 0x00000004);
1337          }
1338          /**
1339           * <code>required uint64 ipcSerialNumber = 3;</code>
1340           */
1341          public long getIpcSerialNumber() {
1342            return ipcSerialNumber_;
1343          }
1344          /**
1345           * <code>required uint64 ipcSerialNumber = 3;</code>
1346           */
1347          public Builder setIpcSerialNumber(long value) {
1348            bitField0_ |= 0x00000004;
1349            ipcSerialNumber_ = value;
1350            onChanged();
1351            return this;
1352          }
1353          /**
1354           * <code>required uint64 ipcSerialNumber = 3;</code>
1355           */
1356          public Builder clearIpcSerialNumber() {
1357            bitField0_ = (bitField0_ & ~0x00000004);
1358            ipcSerialNumber_ = 0L;
1359            onChanged();
1360            return this;
1361          }
1362    
1363          // optional uint64 committedTxId = 4;
1364          private long committedTxId_ ;
1365          /**
1366           * <code>optional uint64 committedTxId = 4;</code>
1367           *
1368           * <pre>
1369           * Whenever a writer makes a request, it informs
1370           * the node of the latest committed txid. This may
1371           * be higher than the transaction data included in the
1372           * request itself, eg in the case that the node has
1373           * fallen behind.
1374           * </pre>
1375           */
1376          public boolean hasCommittedTxId() {
1377            return ((bitField0_ & 0x00000008) == 0x00000008);
1378          }
1379          /**
1380           * <code>optional uint64 committedTxId = 4;</code>
1381           *
1382           * <pre>
1383           * Whenever a writer makes a request, it informs
1384           * the node of the latest committed txid. This may
1385           * be higher than the transaction data included in the
1386           * request itself, eg in the case that the node has
1387           * fallen behind.
1388           * </pre>
1389           */
1390          public long getCommittedTxId() {
1391            return committedTxId_;
1392          }
1393          /**
1394           * <code>optional uint64 committedTxId = 4;</code>
1395           *
1396           * <pre>
1397           * Whenever a writer makes a request, it informs
1398           * the node of the latest committed txid. This may
1399           * be higher than the transaction data included in the
1400           * request itself, eg in the case that the node has
1401           * fallen behind.
1402           * </pre>
1403           */
1404          public Builder setCommittedTxId(long value) {
1405            bitField0_ |= 0x00000008;
1406            committedTxId_ = value;
1407            onChanged();
1408            return this;
1409          }
1410          /**
1411           * <code>optional uint64 committedTxId = 4;</code>
1412           *
1413           * <pre>
1414           * Whenever a writer makes a request, it informs
1415           * the node of the latest committed txid. This may
1416           * be higher than the transaction data included in the
1417           * request itself, eg in the case that the node has
1418           * fallen behind.
1419           * </pre>
1420           */
1421          public Builder clearCommittedTxId() {
1422            bitField0_ = (bitField0_ & ~0x00000008);
1423            committedTxId_ = 0L;
1424            onChanged();
1425            return this;
1426          }
1427    
1428          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RequestInfoProto)
1429        }
1430    
1431        static {
1432          defaultInstance = new RequestInfoProto(true);
1433          defaultInstance.initFields();
1434        }
1435    
1436        // @@protoc_insertion_point(class_scope:hadoop.hdfs.RequestInfoProto)
1437      }
1438    
1439      public interface SegmentStateProtoOrBuilder
1440          extends com.google.protobuf.MessageOrBuilder {
1441    
1442        // required uint64 startTxId = 1;
1443        /**
1444         * <code>required uint64 startTxId = 1;</code>
1445         */
1446        boolean hasStartTxId();
1447        /**
1448         * <code>required uint64 startTxId = 1;</code>
1449         */
1450        long getStartTxId();
1451    
1452        // required uint64 endTxId = 2;
1453        /**
1454         * <code>required uint64 endTxId = 2;</code>
1455         */
1456        boolean hasEndTxId();
1457        /**
1458         * <code>required uint64 endTxId = 2;</code>
1459         */
1460        long getEndTxId();
1461    
1462        // required bool isInProgress = 3;
1463        /**
1464         * <code>required bool isInProgress = 3;</code>
1465         */
1466        boolean hasIsInProgress();
1467        /**
1468         * <code>required bool isInProgress = 3;</code>
1469         */
1470        boolean getIsInProgress();
1471      }
1472      /**
1473       * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1474       */
1475      public static final class SegmentStateProto extends
1476          com.google.protobuf.GeneratedMessage
1477          implements SegmentStateProtoOrBuilder {
1478        // Use SegmentStateProto.newBuilder() to construct.
1479        private SegmentStateProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1480          super(builder);
1481          this.unknownFields = builder.getUnknownFields();
1482        }
1483        private SegmentStateProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1484    
1485        private static final SegmentStateProto defaultInstance;
1486        public static SegmentStateProto getDefaultInstance() {
1487          return defaultInstance;
1488        }
1489    
1490        public SegmentStateProto getDefaultInstanceForType() {
1491          return defaultInstance;
1492        }
1493    
1494        private final com.google.protobuf.UnknownFieldSet unknownFields;
1495        @java.lang.Override
1496        public final com.google.protobuf.UnknownFieldSet
1497            getUnknownFields() {
1498          return this.unknownFields;
1499        }
1500        private SegmentStateProto(
1501            com.google.protobuf.CodedInputStream input,
1502            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1503            throws com.google.protobuf.InvalidProtocolBufferException {
1504          initFields();
1505          int mutable_bitField0_ = 0;
1506          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1507              com.google.protobuf.UnknownFieldSet.newBuilder();
1508          try {
1509            boolean done = false;
1510            while (!done) {
1511              int tag = input.readTag();
1512              switch (tag) {
1513                case 0:
1514                  done = true;
1515                  break;
1516                default: {
1517                  if (!parseUnknownField(input, unknownFields,
1518                                         extensionRegistry, tag)) {
1519                    done = true;
1520                  }
1521                  break;
1522                }
1523                case 8: {
1524                  bitField0_ |= 0x00000001;
1525                  startTxId_ = input.readUInt64();
1526                  break;
1527                }
1528                case 16: {
1529                  bitField0_ |= 0x00000002;
1530                  endTxId_ = input.readUInt64();
1531                  break;
1532                }
1533                case 24: {
1534                  bitField0_ |= 0x00000004;
1535                  isInProgress_ = input.readBool();
1536                  break;
1537                }
1538              }
1539            }
1540          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1541            throw e.setUnfinishedMessage(this);
1542          } catch (java.io.IOException e) {
1543            throw new com.google.protobuf.InvalidProtocolBufferException(
1544                e.getMessage()).setUnfinishedMessage(this);
1545          } finally {
1546            this.unknownFields = unknownFields.build();
1547            makeExtensionsImmutable();
1548          }
1549        }
1550        public static final com.google.protobuf.Descriptors.Descriptor
1551            getDescriptor() {
1552          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1553        }
1554    
1555        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1556            internalGetFieldAccessorTable() {
1557          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1558              .ensureFieldAccessorsInitialized(
1559                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1560        }
1561    
1562        public static com.google.protobuf.Parser<SegmentStateProto> PARSER =
1563            new com.google.protobuf.AbstractParser<SegmentStateProto>() {
1564          public SegmentStateProto parsePartialFrom(
1565              com.google.protobuf.CodedInputStream input,
1566              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1567              throws com.google.protobuf.InvalidProtocolBufferException {
1568            return new SegmentStateProto(input, extensionRegistry);
1569          }
1570        };
1571    
1572        @java.lang.Override
1573        public com.google.protobuf.Parser<SegmentStateProto> getParserForType() {
1574          return PARSER;
1575        }
1576    
1577        private int bitField0_;
1578        // required uint64 startTxId = 1;
1579        public static final int STARTTXID_FIELD_NUMBER = 1;
1580        private long startTxId_;
1581        /**
1582         * <code>required uint64 startTxId = 1;</code>
1583         */
1584        public boolean hasStartTxId() {
1585          return ((bitField0_ & 0x00000001) == 0x00000001);
1586        }
1587        /**
1588         * <code>required uint64 startTxId = 1;</code>
1589         */
1590        public long getStartTxId() {
1591          return startTxId_;
1592        }
1593    
1594        // required uint64 endTxId = 2;
1595        public static final int ENDTXID_FIELD_NUMBER = 2;
1596        private long endTxId_;
1597        /**
1598         * <code>required uint64 endTxId = 2;</code>
1599         */
1600        public boolean hasEndTxId() {
1601          return ((bitField0_ & 0x00000002) == 0x00000002);
1602        }
1603        /**
1604         * <code>required uint64 endTxId = 2;</code>
1605         */
1606        public long getEndTxId() {
1607          return endTxId_;
1608        }
1609    
1610        // required bool isInProgress = 3;
1611        public static final int ISINPROGRESS_FIELD_NUMBER = 3;
1612        private boolean isInProgress_;
1613        /**
1614         * <code>required bool isInProgress = 3;</code>
1615         */
1616        public boolean hasIsInProgress() {
1617          return ((bitField0_ & 0x00000004) == 0x00000004);
1618        }
1619        /**
1620         * <code>required bool isInProgress = 3;</code>
1621         */
1622        public boolean getIsInProgress() {
1623          return isInProgress_;
1624        }
1625    
1626        private void initFields() {
1627          startTxId_ = 0L;
1628          endTxId_ = 0L;
1629          isInProgress_ = false;
1630        }
1631        private byte memoizedIsInitialized = -1;
1632        public final boolean isInitialized() {
1633          byte isInitialized = memoizedIsInitialized;
1634          if (isInitialized != -1) return isInitialized == 1;
1635    
1636          if (!hasStartTxId()) {
1637            memoizedIsInitialized = 0;
1638            return false;
1639          }
1640          if (!hasEndTxId()) {
1641            memoizedIsInitialized = 0;
1642            return false;
1643          }
1644          if (!hasIsInProgress()) {
1645            memoizedIsInitialized = 0;
1646            return false;
1647          }
1648          memoizedIsInitialized = 1;
1649          return true;
1650        }
1651    
1652        public void writeTo(com.google.protobuf.CodedOutputStream output)
1653                            throws java.io.IOException {
1654          getSerializedSize();
1655          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1656            output.writeUInt64(1, startTxId_);
1657          }
1658          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1659            output.writeUInt64(2, endTxId_);
1660          }
1661          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1662            output.writeBool(3, isInProgress_);
1663          }
1664          getUnknownFields().writeTo(output);
1665        }
1666    
1667        private int memoizedSerializedSize = -1;
1668        public int getSerializedSize() {
1669          int size = memoizedSerializedSize;
1670          if (size != -1) return size;
1671    
1672          size = 0;
1673          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1674            size += com.google.protobuf.CodedOutputStream
1675              .computeUInt64Size(1, startTxId_);
1676          }
1677          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1678            size += com.google.protobuf.CodedOutputStream
1679              .computeUInt64Size(2, endTxId_);
1680          }
1681          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1682            size += com.google.protobuf.CodedOutputStream
1683              .computeBoolSize(3, isInProgress_);
1684          }
1685          size += getUnknownFields().getSerializedSize();
1686          memoizedSerializedSize = size;
1687          return size;
1688        }
1689    
1690        private static final long serialVersionUID = 0L;
1691        @java.lang.Override
1692        protected java.lang.Object writeReplace()
1693            throws java.io.ObjectStreamException {
1694          return super.writeReplace();
1695        }
1696    
1697        @java.lang.Override
1698        public boolean equals(final java.lang.Object obj) {
1699          if (obj == this) {
1700           return true;
1701          }
1702          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
1703            return super.equals(obj);
1704          }
1705          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
1706    
1707          boolean result = true;
1708          result = result && (hasStartTxId() == other.hasStartTxId());
1709          if (hasStartTxId()) {
1710            result = result && (getStartTxId()
1711                == other.getStartTxId());
1712          }
1713          result = result && (hasEndTxId() == other.hasEndTxId());
1714          if (hasEndTxId()) {
1715            result = result && (getEndTxId()
1716                == other.getEndTxId());
1717          }
1718          result = result && (hasIsInProgress() == other.hasIsInProgress());
1719          if (hasIsInProgress()) {
1720            result = result && (getIsInProgress()
1721                == other.getIsInProgress());
1722          }
1723          result = result &&
1724              getUnknownFields().equals(other.getUnknownFields());
1725          return result;
1726        }
1727    
1728        private int memoizedHashCode = 0;
1729        @java.lang.Override
1730        public int hashCode() {
1731          if (memoizedHashCode != 0) {
1732            return memoizedHashCode;
1733          }
1734          int hash = 41;
1735          hash = (19 * hash) + getDescriptorForType().hashCode();
1736          if (hasStartTxId()) {
1737            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
1738            hash = (53 * hash) + hashLong(getStartTxId());
1739          }
1740          if (hasEndTxId()) {
1741            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
1742            hash = (53 * hash) + hashLong(getEndTxId());
1743          }
1744          if (hasIsInProgress()) {
1745            hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
1746            hash = (53 * hash) + hashBoolean(getIsInProgress());
1747          }
1748          hash = (29 * hash) + getUnknownFields().hashCode();
1749          memoizedHashCode = hash;
1750          return hash;
1751        }
1752    
1753        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1754            com.google.protobuf.ByteString data)
1755            throws com.google.protobuf.InvalidProtocolBufferException {
1756          return PARSER.parseFrom(data);
1757        }
1758        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1759            com.google.protobuf.ByteString data,
1760            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1761            throws com.google.protobuf.InvalidProtocolBufferException {
1762          return PARSER.parseFrom(data, extensionRegistry);
1763        }
1764        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
1765            throws com.google.protobuf.InvalidProtocolBufferException {
1766          return PARSER.parseFrom(data);
1767        }
1768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1769            byte[] data,
1770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1771            throws com.google.protobuf.InvalidProtocolBufferException {
1772          return PARSER.parseFrom(data, extensionRegistry);
1773        }
1774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
1775            throws java.io.IOException {
1776          return PARSER.parseFrom(input);
1777        }
1778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1779            java.io.InputStream input,
1780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1781            throws java.io.IOException {
1782          return PARSER.parseFrom(input, extensionRegistry);
1783        }
1784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
1785            throws java.io.IOException {
1786          return PARSER.parseDelimitedFrom(input);
1787        }
1788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
1789            java.io.InputStream input,
1790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1791            throws java.io.IOException {
1792          return PARSER.parseDelimitedFrom(input, extensionRegistry);
1793        }
1794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1795            com.google.protobuf.CodedInputStream input)
1796            throws java.io.IOException {
1797          return PARSER.parseFrom(input);
1798        }
1799        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1800            com.google.protobuf.CodedInputStream input,
1801            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1802            throws java.io.IOException {
1803          return PARSER.parseFrom(input, extensionRegistry);
1804        }
1805    
1806        public static Builder newBuilder() { return Builder.create(); }
1807        public Builder newBuilderForType() { return newBuilder(); }
1808        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
1809          return newBuilder().mergeFrom(prototype);
1810        }
1811        public Builder toBuilder() { return newBuilder(this); }
1812    
1813        @java.lang.Override
1814        protected Builder newBuilderForType(
1815            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1816          Builder builder = new Builder(parent);
1817          return builder;
1818        }
1819        /**
1820         * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1821         */
1822        public static final class Builder extends
1823            com.google.protobuf.GeneratedMessage.Builder<Builder>
1824           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
1825          public static final com.google.protobuf.Descriptors.Descriptor
1826              getDescriptor() {
1827            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1828          }
1829    
1830          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1831              internalGetFieldAccessorTable() {
1832            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1833                .ensureFieldAccessorsInitialized(
1834                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1835          }
1836    
1837          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
1838          private Builder() {
1839            maybeForceBuilderInitialization();
1840          }
1841    
1842          private Builder(
1843              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1844            super(parent);
1845            maybeForceBuilderInitialization();
1846          }
1847          private void maybeForceBuilderInitialization() {
1848            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1849            }
1850          }
1851          private static Builder create() {
1852            return new Builder();
1853          }
1854    
1855          public Builder clear() {
1856            super.clear();
1857            startTxId_ = 0L;
1858            bitField0_ = (bitField0_ & ~0x00000001);
1859            endTxId_ = 0L;
1860            bitField0_ = (bitField0_ & ~0x00000002);
1861            isInProgress_ = false;
1862            bitField0_ = (bitField0_ & ~0x00000004);
1863            return this;
1864          }
1865    
1866          public Builder clone() {
1867            return create().mergeFrom(buildPartial());
1868          }
1869    
1870          public com.google.protobuf.Descriptors.Descriptor
1871              getDescriptorForType() {
1872            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1873          }
1874    
1875          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
1876            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1877          }
1878    
1879          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
1880            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1881            if (!result.isInitialized()) {
1882              throw newUninitializedMessageException(result);
1883            }
1884            return result;
1885          }
1886    
1887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
1888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
1889            int from_bitField0_ = bitField0_;
1890            int to_bitField0_ = 0;
1891            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1892              to_bitField0_ |= 0x00000001;
1893            }
1894            result.startTxId_ = startTxId_;
1895            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1896              to_bitField0_ |= 0x00000002;
1897            }
1898            result.endTxId_ = endTxId_;
1899            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1900              to_bitField0_ |= 0x00000004;
1901            }
1902            result.isInProgress_ = isInProgress_;
1903            result.bitField0_ = to_bitField0_;
1904            onBuilt();
1905            return result;
1906          }
1907    
1908          public Builder mergeFrom(com.google.protobuf.Message other) {
1909            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
1910              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
1911            } else {
1912              super.mergeFrom(other);
1913              return this;
1914            }
1915          }
1916    
1917          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
1918            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
1919            if (other.hasStartTxId()) {
1920              setStartTxId(other.getStartTxId());
1921            }
1922            if (other.hasEndTxId()) {
1923              setEndTxId(other.getEndTxId());
1924            }
1925            if (other.hasIsInProgress()) {
1926              setIsInProgress(other.getIsInProgress());
1927            }
1928            this.mergeUnknownFields(other.getUnknownFields());
1929            return this;
1930          }
1931    
1932          public final boolean isInitialized() {
1933            if (!hasStartTxId()) {
1934              
1935              return false;
1936            }
1937            if (!hasEndTxId()) {
1938              
1939              return false;
1940            }
1941            if (!hasIsInProgress()) {
1942              
1943              return false;
1944            }
1945            return true;
1946          }
1947    
1948          public Builder mergeFrom(
1949              com.google.protobuf.CodedInputStream input,
1950              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1951              throws java.io.IOException {
1952            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parsedMessage = null;
1953            try {
1954              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1955            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1956              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) e.getUnfinishedMessage();
1957              throw e;
1958            } finally {
1959              if (parsedMessage != null) {
1960                mergeFrom(parsedMessage);
1961              }
1962            }
1963            return this;
1964          }
1965          private int bitField0_;
1966    
1967          // required uint64 startTxId = 1;
1968          private long startTxId_ ;
1969          /**
1970           * <code>required uint64 startTxId = 1;</code>
1971           */
1972          public boolean hasStartTxId() {
1973            return ((bitField0_ & 0x00000001) == 0x00000001);
1974          }
1975          /**
1976           * <code>required uint64 startTxId = 1;</code>
1977           */
1978          public long getStartTxId() {
1979            return startTxId_;
1980          }
1981          /**
1982           * <code>required uint64 startTxId = 1;</code>
1983           */
1984          public Builder setStartTxId(long value) {
1985            bitField0_ |= 0x00000001;
1986            startTxId_ = value;
1987            onChanged();
1988            return this;
1989          }
1990          /**
1991           * <code>required uint64 startTxId = 1;</code>
1992           */
1993          public Builder clearStartTxId() {
1994            bitField0_ = (bitField0_ & ~0x00000001);
1995            startTxId_ = 0L;
1996            onChanged();
1997            return this;
1998          }
1999    
2000          // required uint64 endTxId = 2;
2001          private long endTxId_ ;
2002          /**
2003           * <code>required uint64 endTxId = 2;</code>
2004           */
2005          public boolean hasEndTxId() {
2006            return ((bitField0_ & 0x00000002) == 0x00000002);
2007          }
2008          /**
2009           * <code>required uint64 endTxId = 2;</code>
2010           */
2011          public long getEndTxId() {
2012            return endTxId_;
2013          }
2014          /**
2015           * <code>required uint64 endTxId = 2;</code>
2016           */
2017          public Builder setEndTxId(long value) {
2018            bitField0_ |= 0x00000002;
2019            endTxId_ = value;
2020            onChanged();
2021            return this;
2022          }
2023          /**
2024           * <code>required uint64 endTxId = 2;</code>
2025           */
2026          public Builder clearEndTxId() {
2027            bitField0_ = (bitField0_ & ~0x00000002);
2028            endTxId_ = 0L;
2029            onChanged();
2030            return this;
2031          }
2032    
2033          // required bool isInProgress = 3;
2034          private boolean isInProgress_ ;
2035          /**
2036           * <code>required bool isInProgress = 3;</code>
2037           */
2038          public boolean hasIsInProgress() {
2039            return ((bitField0_ & 0x00000004) == 0x00000004);
2040          }
2041          /**
2042           * <code>required bool isInProgress = 3;</code>
2043           */
2044          public boolean getIsInProgress() {
2045            return isInProgress_;
2046          }
2047          /**
2048           * <code>required bool isInProgress = 3;</code>
2049           */
2050          public Builder setIsInProgress(boolean value) {
2051            bitField0_ |= 0x00000004;
2052            isInProgress_ = value;
2053            onChanged();
2054            return this;
2055          }
2056          /**
2057           * <code>required bool isInProgress = 3;</code>
2058           */
2059          public Builder clearIsInProgress() {
2060            bitField0_ = (bitField0_ & ~0x00000004);
2061            isInProgress_ = false;
2062            onChanged();
2063            return this;
2064          }
2065    
2066          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SegmentStateProto)
2067        }
2068    
2069        static {
2070          defaultInstance = new SegmentStateProto(true);
2071          defaultInstance.initFields();
2072        }
2073    
2074        // @@protoc_insertion_point(class_scope:hadoop.hdfs.SegmentStateProto)
2075      }
2076    
2077      public interface PersistedRecoveryPaxosDataOrBuilder
2078          extends com.google.protobuf.MessageOrBuilder {
2079    
2080        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2081        /**
2082         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2083         */
2084        boolean hasSegmentState();
2085        /**
2086         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2087         */
2088        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
2089        /**
2090         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2091         */
2092        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
2093    
2094        // required uint64 acceptedInEpoch = 2;
2095        /**
2096         * <code>required uint64 acceptedInEpoch = 2;</code>
2097         */
2098        boolean hasAcceptedInEpoch();
2099        /**
2100         * <code>required uint64 acceptedInEpoch = 2;</code>
2101         */
2102        long getAcceptedInEpoch();
2103      }
2104      /**
2105       * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2106       *
2107       * <pre>
2108       **
2109       * The storage format used on local disk for previously
2110       * accepted decisions.
2111       * </pre>
2112       */
2113      public static final class PersistedRecoveryPaxosData extends
2114          com.google.protobuf.GeneratedMessage
2115          implements PersistedRecoveryPaxosDataOrBuilder {
2116        // Use PersistedRecoveryPaxosData.newBuilder() to construct.
2117        private PersistedRecoveryPaxosData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2118          super(builder);
2119          this.unknownFields = builder.getUnknownFields();
2120        }
2121        private PersistedRecoveryPaxosData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2122    
2123        private static final PersistedRecoveryPaxosData defaultInstance;
2124        public static PersistedRecoveryPaxosData getDefaultInstance() {
2125          return defaultInstance;
2126        }
2127    
2128        public PersistedRecoveryPaxosData getDefaultInstanceForType() {
2129          return defaultInstance;
2130        }
2131    
2132        private final com.google.protobuf.UnknownFieldSet unknownFields;
2133        @java.lang.Override
2134        public final com.google.protobuf.UnknownFieldSet
2135            getUnknownFields() {
2136          return this.unknownFields;
2137        }
2138        private PersistedRecoveryPaxosData(
2139            com.google.protobuf.CodedInputStream input,
2140            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2141            throws com.google.protobuf.InvalidProtocolBufferException {
2142          initFields();
2143          int mutable_bitField0_ = 0;
2144          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2145              com.google.protobuf.UnknownFieldSet.newBuilder();
2146          try {
2147            boolean done = false;
2148            while (!done) {
2149              int tag = input.readTag();
2150              switch (tag) {
2151                case 0:
2152                  done = true;
2153                  break;
2154                default: {
2155                  if (!parseUnknownField(input, unknownFields,
2156                                         extensionRegistry, tag)) {
2157                    done = true;
2158                  }
2159                  break;
2160                }
2161                case 10: {
2162                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
2163                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2164                    subBuilder = segmentState_.toBuilder();
2165                  }
2166                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
2167                  if (subBuilder != null) {
2168                    subBuilder.mergeFrom(segmentState_);
2169                    segmentState_ = subBuilder.buildPartial();
2170                  }
2171                  bitField0_ |= 0x00000001;
2172                  break;
2173                }
2174                case 16: {
2175                  bitField0_ |= 0x00000002;
2176                  acceptedInEpoch_ = input.readUInt64();
2177                  break;
2178                }
2179              }
2180            }
2181          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2182            throw e.setUnfinishedMessage(this);
2183          } catch (java.io.IOException e) {
2184            throw new com.google.protobuf.InvalidProtocolBufferException(
2185                e.getMessage()).setUnfinishedMessage(this);
2186          } finally {
2187            this.unknownFields = unknownFields.build();
2188            makeExtensionsImmutable();
2189          }
2190        }
2191        public static final com.google.protobuf.Descriptors.Descriptor
2192            getDescriptor() {
2193          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2194        }
2195    
2196        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2197            internalGetFieldAccessorTable() {
2198          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2199              .ensureFieldAccessorsInitialized(
2200                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2201        }
2202    
2203        public static com.google.protobuf.Parser<PersistedRecoveryPaxosData> PARSER =
2204            new com.google.protobuf.AbstractParser<PersistedRecoveryPaxosData>() {
2205          public PersistedRecoveryPaxosData parsePartialFrom(
2206              com.google.protobuf.CodedInputStream input,
2207              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2208              throws com.google.protobuf.InvalidProtocolBufferException {
2209            return new PersistedRecoveryPaxosData(input, extensionRegistry);
2210          }
2211        };
2212    
2213        @java.lang.Override
2214        public com.google.protobuf.Parser<PersistedRecoveryPaxosData> getParserForType() {
2215          return PARSER;
2216        }
2217    
2218        private int bitField0_;
2219        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2220        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
2221        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
2222        /**
2223         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2224         */
2225        public boolean hasSegmentState() {
2226          return ((bitField0_ & 0x00000001) == 0x00000001);
2227        }
2228        /**
2229         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2230         */
2231        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2232          return segmentState_;
2233        }
2234        /**
2235         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2236         */
2237        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2238          return segmentState_;
2239        }
2240    
2241        // required uint64 acceptedInEpoch = 2;
2242        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
2243        private long acceptedInEpoch_;
2244        /**
2245         * <code>required uint64 acceptedInEpoch = 2;</code>
2246         */
2247        public boolean hasAcceptedInEpoch() {
2248          return ((bitField0_ & 0x00000002) == 0x00000002);
2249        }
2250        /**
2251         * <code>required uint64 acceptedInEpoch = 2;</code>
2252         */
2253        public long getAcceptedInEpoch() {
2254          return acceptedInEpoch_;
2255        }
2256    
2257        private void initFields() {
2258          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2259          acceptedInEpoch_ = 0L;
2260        }
2261        private byte memoizedIsInitialized = -1;
2262        public final boolean isInitialized() {
2263          byte isInitialized = memoizedIsInitialized;
2264          if (isInitialized != -1) return isInitialized == 1;
2265    
2266          if (!hasSegmentState()) {
2267            memoizedIsInitialized = 0;
2268            return false;
2269          }
2270          if (!hasAcceptedInEpoch()) {
2271            memoizedIsInitialized = 0;
2272            return false;
2273          }
2274          if (!getSegmentState().isInitialized()) {
2275            memoizedIsInitialized = 0;
2276            return false;
2277          }
2278          memoizedIsInitialized = 1;
2279          return true;
2280        }
2281    
2282        public void writeTo(com.google.protobuf.CodedOutputStream output)
2283                            throws java.io.IOException {
2284          getSerializedSize();
2285          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2286            output.writeMessage(1, segmentState_);
2287          }
2288          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2289            output.writeUInt64(2, acceptedInEpoch_);
2290          }
2291          getUnknownFields().writeTo(output);
2292        }
2293    
2294        private int memoizedSerializedSize = -1;
2295        public int getSerializedSize() {
2296          int size = memoizedSerializedSize;
2297          if (size != -1) return size;
2298    
2299          size = 0;
2300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2301            size += com.google.protobuf.CodedOutputStream
2302              .computeMessageSize(1, segmentState_);
2303          }
2304          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2305            size += com.google.protobuf.CodedOutputStream
2306              .computeUInt64Size(2, acceptedInEpoch_);
2307          }
2308          size += getUnknownFields().getSerializedSize();
2309          memoizedSerializedSize = size;
2310          return size;
2311        }
2312    
2313        private static final long serialVersionUID = 0L;
2314        @java.lang.Override
2315        protected java.lang.Object writeReplace()
2316            throws java.io.ObjectStreamException {
2317          return super.writeReplace();
2318        }
2319    
2320        @java.lang.Override
2321        public boolean equals(final java.lang.Object obj) {
2322          if (obj == this) {
2323           return true;
2324          }
2325          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
2326            return super.equals(obj);
2327          }
2328          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
2329    
2330          boolean result = true;
2331          result = result && (hasSegmentState() == other.hasSegmentState());
2332          if (hasSegmentState()) {
2333            result = result && getSegmentState()
2334                .equals(other.getSegmentState());
2335          }
2336          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
2337          if (hasAcceptedInEpoch()) {
2338            result = result && (getAcceptedInEpoch()
2339                == other.getAcceptedInEpoch());
2340          }
2341          result = result &&
2342              getUnknownFields().equals(other.getUnknownFields());
2343          return result;
2344        }
2345    
2346        private int memoizedHashCode = 0;
2347        @java.lang.Override
2348        public int hashCode() {
2349          if (memoizedHashCode != 0) {
2350            return memoizedHashCode;
2351          }
2352          int hash = 41;
2353          hash = (19 * hash) + getDescriptorForType().hashCode();
2354          if (hasSegmentState()) {
2355            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
2356            hash = (53 * hash) + getSegmentState().hashCode();
2357          }
2358          if (hasAcceptedInEpoch()) {
2359            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
2360            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
2361          }
2362          hash = (29 * hash) + getUnknownFields().hashCode();
2363          memoizedHashCode = hash;
2364          return hash;
2365        }
2366    
2367        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2368            com.google.protobuf.ByteString data)
2369            throws com.google.protobuf.InvalidProtocolBufferException {
2370          return PARSER.parseFrom(data);
2371        }
2372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2373            com.google.protobuf.ByteString data,
2374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2375            throws com.google.protobuf.InvalidProtocolBufferException {
2376          return PARSER.parseFrom(data, extensionRegistry);
2377        }
2378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
2379            throws com.google.protobuf.InvalidProtocolBufferException {
2380          return PARSER.parseFrom(data);
2381        }
2382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2383            byte[] data,
2384            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2385            throws com.google.protobuf.InvalidProtocolBufferException {
2386          return PARSER.parseFrom(data, extensionRegistry);
2387        }
2388        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
2389            throws java.io.IOException {
2390          return PARSER.parseFrom(input);
2391        }
2392        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2393            java.io.InputStream input,
2394            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2395            throws java.io.IOException {
2396          return PARSER.parseFrom(input, extensionRegistry);
2397        }
2398        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
2399            throws java.io.IOException {
2400          return PARSER.parseDelimitedFrom(input);
2401        }
2402        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
2403            java.io.InputStream input,
2404            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2405            throws java.io.IOException {
2406          return PARSER.parseDelimitedFrom(input, extensionRegistry);
2407        }
2408        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2409            com.google.protobuf.CodedInputStream input)
2410            throws java.io.IOException {
2411          return PARSER.parseFrom(input);
2412        }
2413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2414            com.google.protobuf.CodedInputStream input,
2415            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2416            throws java.io.IOException {
2417          return PARSER.parseFrom(input, extensionRegistry);
2418        }
2419    
2420        public static Builder newBuilder() { return Builder.create(); }
2421        public Builder newBuilderForType() { return newBuilder(); }
2422        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
2423          return newBuilder().mergeFrom(prototype);
2424        }
2425        public Builder toBuilder() { return newBuilder(this); }
2426    
2427        @java.lang.Override
2428        protected Builder newBuilderForType(
2429            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2430          Builder builder = new Builder(parent);
2431          return builder;
2432        }
2433        /**
2434         * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2435         *
2436         * <pre>
2437         **
2438         * The storage format used on local disk for previously
2439         * accepted decisions.
2440         * </pre>
2441         */
2442        public static final class Builder extends
2443            com.google.protobuf.GeneratedMessage.Builder<Builder>
2444           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
2445          public static final com.google.protobuf.Descriptors.Descriptor
2446              getDescriptor() {
2447            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2448          }
2449    
2450          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2451              internalGetFieldAccessorTable() {
2452            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2453                .ensureFieldAccessorsInitialized(
2454                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2455          }
2456    
2457          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
2458          private Builder() {
2459            maybeForceBuilderInitialization();
2460          }
2461    
2462          private Builder(
2463              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2464            super(parent);
2465            maybeForceBuilderInitialization();
2466          }
2467          private void maybeForceBuilderInitialization() {
2468            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2469              getSegmentStateFieldBuilder();
2470            }
2471          }
2472          private static Builder create() {
2473            return new Builder();
2474          }
2475    
2476          public Builder clear() {
2477            super.clear();
2478            if (segmentStateBuilder_ == null) {
2479              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2480            } else {
2481              segmentStateBuilder_.clear();
2482            }
2483            bitField0_ = (bitField0_ & ~0x00000001);
2484            acceptedInEpoch_ = 0L;
2485            bitField0_ = (bitField0_ & ~0x00000002);
2486            return this;
2487          }
2488    
2489          public Builder clone() {
2490            return create().mergeFrom(buildPartial());
2491          }
2492    
2493          public com.google.protobuf.Descriptors.Descriptor
2494              getDescriptorForType() {
2495            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2496          }
2497    
2498          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
2499            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
2500          }
2501    
2502          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
2503            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
2504            if (!result.isInitialized()) {
2505              throw newUninitializedMessageException(result);
2506            }
2507            return result;
2508          }
2509    
2510          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
2511            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
2512            int from_bitField0_ = bitField0_;
2513            int to_bitField0_ = 0;
2514            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2515              to_bitField0_ |= 0x00000001;
2516            }
2517            if (segmentStateBuilder_ == null) {
2518              result.segmentState_ = segmentState_;
2519            } else {
2520              result.segmentState_ = segmentStateBuilder_.build();
2521            }
2522            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2523              to_bitField0_ |= 0x00000002;
2524            }
2525            result.acceptedInEpoch_ = acceptedInEpoch_;
2526            result.bitField0_ = to_bitField0_;
2527            onBuilt();
2528            return result;
2529          }
2530    
2531          public Builder mergeFrom(com.google.protobuf.Message other) {
2532            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
2533              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
2534            } else {
2535              super.mergeFrom(other);
2536              return this;
2537            }
2538          }
2539    
2540          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
2541            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
2542            if (other.hasSegmentState()) {
2543              mergeSegmentState(other.getSegmentState());
2544            }
2545            if (other.hasAcceptedInEpoch()) {
2546              setAcceptedInEpoch(other.getAcceptedInEpoch());
2547            }
2548            this.mergeUnknownFields(other.getUnknownFields());
2549            return this;
2550          }
2551    
2552          public final boolean isInitialized() {
2553            if (!hasSegmentState()) {
2554              
2555              return false;
2556            }
2557            if (!hasAcceptedInEpoch()) {
2558              
2559              return false;
2560            }
2561            if (!getSegmentState().isInitialized()) {
2562              
2563              return false;
2564            }
2565            return true;
2566          }
2567    
2568          public Builder mergeFrom(
2569              com.google.protobuf.CodedInputStream input,
2570              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2571              throws java.io.IOException {
2572            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parsedMessage = null;
2573            try {
2574              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2575            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2576              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) e.getUnfinishedMessage();
2577              throw e;
2578            } finally {
2579              if (parsedMessage != null) {
2580                mergeFrom(parsedMessage);
2581              }
2582            }
2583            return this;
2584          }
2585          private int bitField0_;
2586    
2587          // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2588          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2589          private com.google.protobuf.SingleFieldBuilder<
2590              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
2591          /**
2592           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2593           */
2594          public boolean hasSegmentState() {
2595            return ((bitField0_ & 0x00000001) == 0x00000001);
2596          }
2597          /**
2598           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2599           */
2600          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2601            if (segmentStateBuilder_ == null) {
2602              return segmentState_;
2603            } else {
2604              return segmentStateBuilder_.getMessage();
2605            }
2606          }
2607          /**
2608           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2609           */
2610          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2611            if (segmentStateBuilder_ == null) {
2612              if (value == null) {
2613                throw new NullPointerException();
2614              }
2615              segmentState_ = value;
2616              onChanged();
2617            } else {
2618              segmentStateBuilder_.setMessage(value);
2619            }
2620            bitField0_ |= 0x00000001;
2621            return this;
2622          }
2623          /**
2624           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2625           */
2626          public Builder setSegmentState(
2627              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
2628            if (segmentStateBuilder_ == null) {
2629              segmentState_ = builderForValue.build();
2630              onChanged();
2631            } else {
2632              segmentStateBuilder_.setMessage(builderForValue.build());
2633            }
2634            bitField0_ |= 0x00000001;
2635            return this;
2636          }
2637          /**
2638           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2639           */
2640          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2641            if (segmentStateBuilder_ == null) {
2642              if (((bitField0_ & 0x00000001) == 0x00000001) &&
2643                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
2644                segmentState_ =
2645                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
2646              } else {
2647                segmentState_ = value;
2648              }
2649              onChanged();
2650            } else {
2651              segmentStateBuilder_.mergeFrom(value);
2652            }
2653            bitField0_ |= 0x00000001;
2654            return this;
2655          }
2656          /**
2657           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2658           */
2659          public Builder clearSegmentState() {
2660            if (segmentStateBuilder_ == null) {
2661              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2662              onChanged();
2663            } else {
2664              segmentStateBuilder_.clear();
2665            }
2666            bitField0_ = (bitField0_ & ~0x00000001);
2667            return this;
2668          }
2669          /**
2670           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2671           */
2672          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
2673            bitField0_ |= 0x00000001;
2674            onChanged();
2675            return getSegmentStateFieldBuilder().getBuilder();
2676          }
2677          /**
2678           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2679           */
2680          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2681            if (segmentStateBuilder_ != null) {
2682              return segmentStateBuilder_.getMessageOrBuilder();
2683            } else {
2684              return segmentState_;
2685            }
2686          }
2687          /**
2688           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2689           */
2690          private com.google.protobuf.SingleFieldBuilder<
2691              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
2692              getSegmentStateFieldBuilder() {
2693            if (segmentStateBuilder_ == null) {
2694              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2695                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
2696                      segmentState_,
2697                      getParentForChildren(),
2698                      isClean());
2699              segmentState_ = null;
2700            }
2701            return segmentStateBuilder_;
2702          }
2703    
2704          // required uint64 acceptedInEpoch = 2;
2705          private long acceptedInEpoch_ ;
2706          /**
2707           * <code>required uint64 acceptedInEpoch = 2;</code>
2708           */
2709          public boolean hasAcceptedInEpoch() {
2710            return ((bitField0_ & 0x00000002) == 0x00000002);
2711          }
2712          /**
2713           * <code>required uint64 acceptedInEpoch = 2;</code>
2714           */
2715          public long getAcceptedInEpoch() {
2716            return acceptedInEpoch_;
2717          }
2718          /**
2719           * <code>required uint64 acceptedInEpoch = 2;</code>
2720           */
2721          public Builder setAcceptedInEpoch(long value) {
2722            bitField0_ |= 0x00000002;
2723            acceptedInEpoch_ = value;
2724            onChanged();
2725            return this;
2726          }
2727          /**
2728           * <code>required uint64 acceptedInEpoch = 2;</code>
2729           */
2730          public Builder clearAcceptedInEpoch() {
2731            bitField0_ = (bitField0_ & ~0x00000002);
2732            acceptedInEpoch_ = 0L;
2733            onChanged();
2734            return this;
2735          }
2736    
2737          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2738        }
2739    
2740        static {
2741          defaultInstance = new PersistedRecoveryPaxosData(true);
2742          defaultInstance.initFields();
2743        }
2744    
2745        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2746      }
2747    
2748      public interface JournalRequestProtoOrBuilder
2749          extends com.google.protobuf.MessageOrBuilder {
2750    
2751        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2752        /**
2753         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2754         */
2755        boolean hasReqInfo();
2756        /**
2757         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2758         */
2759        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
2760        /**
2761         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2762         */
2763        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
2764    
2765        // required uint64 firstTxnId = 2;
2766        /**
2767         * <code>required uint64 firstTxnId = 2;</code>
2768         */
2769        boolean hasFirstTxnId();
2770        /**
2771         * <code>required uint64 firstTxnId = 2;</code>
2772         */
2773        long getFirstTxnId();
2774    
2775        // required uint32 numTxns = 3;
2776        /**
2777         * <code>required uint32 numTxns = 3;</code>
2778         */
2779        boolean hasNumTxns();
2780        /**
2781         * <code>required uint32 numTxns = 3;</code>
2782         */
2783        int getNumTxns();
2784    
2785        // required bytes records = 4;
2786        /**
2787         * <code>required bytes records = 4;</code>
2788         */
2789        boolean hasRecords();
2790        /**
2791         * <code>required bytes records = 4;</code>
2792         */
2793        com.google.protobuf.ByteString getRecords();
2794    
2795        // required uint64 segmentTxnId = 5;
2796        /**
2797         * <code>required uint64 segmentTxnId = 5;</code>
2798         */
2799        boolean hasSegmentTxnId();
2800        /**
2801         * <code>required uint64 segmentTxnId = 5;</code>
2802         */
2803        long getSegmentTxnId();
2804      }
2805      /**
2806       * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
2807       */
2808      public static final class JournalRequestProto extends
2809          com.google.protobuf.GeneratedMessage
2810          implements JournalRequestProtoOrBuilder {
2811        // Use JournalRequestProto.newBuilder() to construct.
2812        private JournalRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2813          super(builder);
2814          this.unknownFields = builder.getUnknownFields();
2815        }
2816        private JournalRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2817    
2818        private static final JournalRequestProto defaultInstance;
2819        public static JournalRequestProto getDefaultInstance() {
2820          return defaultInstance;
2821        }
2822    
2823        public JournalRequestProto getDefaultInstanceForType() {
2824          return defaultInstance;
2825        }
2826    
2827        private final com.google.protobuf.UnknownFieldSet unknownFields;
2828        @java.lang.Override
2829        public final com.google.protobuf.UnknownFieldSet
2830            getUnknownFields() {
2831          return this.unknownFields;
2832        }
2833        private JournalRequestProto(
2834            com.google.protobuf.CodedInputStream input,
2835            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2836            throws com.google.protobuf.InvalidProtocolBufferException {
2837          initFields();
2838          int mutable_bitField0_ = 0;
2839          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2840              com.google.protobuf.UnknownFieldSet.newBuilder();
2841          try {
2842            boolean done = false;
2843            while (!done) {
2844              int tag = input.readTag();
2845              switch (tag) {
2846                case 0:
2847                  done = true;
2848                  break;
2849                default: {
2850                  if (!parseUnknownField(input, unknownFields,
2851                                         extensionRegistry, tag)) {
2852                    done = true;
2853                  }
2854                  break;
2855                }
2856                case 10: {
2857                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
2858                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2859                    subBuilder = reqInfo_.toBuilder();
2860                  }
2861                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
2862                  if (subBuilder != null) {
2863                    subBuilder.mergeFrom(reqInfo_);
2864                    reqInfo_ = subBuilder.buildPartial();
2865                  }
2866                  bitField0_ |= 0x00000001;
2867                  break;
2868                }
2869                case 16: {
2870                  bitField0_ |= 0x00000002;
2871                  firstTxnId_ = input.readUInt64();
2872                  break;
2873                }
2874                case 24: {
2875                  bitField0_ |= 0x00000004;
2876                  numTxns_ = input.readUInt32();
2877                  break;
2878                }
2879                case 34: {
2880                  bitField0_ |= 0x00000008;
2881                  records_ = input.readBytes();
2882                  break;
2883                }
2884                case 40: {
2885                  bitField0_ |= 0x00000010;
2886                  segmentTxnId_ = input.readUInt64();
2887                  break;
2888                }
2889              }
2890            }
2891          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2892            throw e.setUnfinishedMessage(this);
2893          } catch (java.io.IOException e) {
2894            throw new com.google.protobuf.InvalidProtocolBufferException(
2895                e.getMessage()).setUnfinishedMessage(this);
2896          } finally {
2897            this.unknownFields = unknownFields.build();
2898            makeExtensionsImmutable();
2899          }
2900        }
2901        public static final com.google.protobuf.Descriptors.Descriptor
2902            getDescriptor() {
2903          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
2904        }
2905    
2906        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2907            internalGetFieldAccessorTable() {
2908          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
2909              .ensureFieldAccessorsInitialized(
2910                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
2911        }
2912    
2913        public static com.google.protobuf.Parser<JournalRequestProto> PARSER =
2914            new com.google.protobuf.AbstractParser<JournalRequestProto>() {
2915          public JournalRequestProto parsePartialFrom(
2916              com.google.protobuf.CodedInputStream input,
2917              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2918              throws com.google.protobuf.InvalidProtocolBufferException {
2919            return new JournalRequestProto(input, extensionRegistry);
2920          }
2921        };
2922    
2923        @java.lang.Override
2924        public com.google.protobuf.Parser<JournalRequestProto> getParserForType() {
2925          return PARSER;
2926        }
2927    
2928        private int bitField0_;
2929        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2930        public static final int REQINFO_FIELD_NUMBER = 1;
2931        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
2932        /**
2933         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2934         */
2935        public boolean hasReqInfo() {
2936          return ((bitField0_ & 0x00000001) == 0x00000001);
2937        }
2938        /**
2939         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2940         */
2941        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2942          return reqInfo_;
2943        }
2944        /**
2945         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2946         */
2947        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2948          return reqInfo_;
2949        }
2950    
2951        // required uint64 firstTxnId = 2;
2952        public static final int FIRSTTXNID_FIELD_NUMBER = 2;
2953        private long firstTxnId_;
2954        /**
2955         * <code>required uint64 firstTxnId = 2;</code>
2956         */
2957        public boolean hasFirstTxnId() {
2958          return ((bitField0_ & 0x00000002) == 0x00000002);
2959        }
2960        /**
2961         * <code>required uint64 firstTxnId = 2;</code>
2962         */
2963        public long getFirstTxnId() {
2964          return firstTxnId_;
2965        }
2966    
2967        // required uint32 numTxns = 3;
2968        public static final int NUMTXNS_FIELD_NUMBER = 3;
2969        private int numTxns_;
2970        /**
2971         * <code>required uint32 numTxns = 3;</code>
2972         */
2973        public boolean hasNumTxns() {
2974          return ((bitField0_ & 0x00000004) == 0x00000004);
2975        }
2976        /**
2977         * <code>required uint32 numTxns = 3;</code>
2978         */
2979        public int getNumTxns() {
2980          return numTxns_;
2981        }
2982    
2983        // required bytes records = 4;
2984        public static final int RECORDS_FIELD_NUMBER = 4;
2985        private com.google.protobuf.ByteString records_;
2986        /**
2987         * <code>required bytes records = 4;</code>
2988         */
2989        public boolean hasRecords() {
2990          return ((bitField0_ & 0x00000008) == 0x00000008);
2991        }
2992        /**
2993         * <code>required bytes records = 4;</code>
2994         */
2995        public com.google.protobuf.ByteString getRecords() {
2996          return records_;
2997        }
2998    
2999        // required uint64 segmentTxnId = 5;
3000        public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
3001        private long segmentTxnId_;
3002        /**
3003         * <code>required uint64 segmentTxnId = 5;</code>
3004         */
3005        public boolean hasSegmentTxnId() {
3006          return ((bitField0_ & 0x00000010) == 0x00000010);
3007        }
3008        /**
3009         * <code>required uint64 segmentTxnId = 5;</code>
3010         */
3011        public long getSegmentTxnId() {
3012          return segmentTxnId_;
3013        }
3014    
3015        private void initFields() {
3016          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3017          firstTxnId_ = 0L;
3018          numTxns_ = 0;
3019          records_ = com.google.protobuf.ByteString.EMPTY;
3020          segmentTxnId_ = 0L;
3021        }
3022        private byte memoizedIsInitialized = -1;
3023        public final boolean isInitialized() {
3024          byte isInitialized = memoizedIsInitialized;
3025          if (isInitialized != -1) return isInitialized == 1;
3026    
3027          if (!hasReqInfo()) {
3028            memoizedIsInitialized = 0;
3029            return false;
3030          }
3031          if (!hasFirstTxnId()) {
3032            memoizedIsInitialized = 0;
3033            return false;
3034          }
3035          if (!hasNumTxns()) {
3036            memoizedIsInitialized = 0;
3037            return false;
3038          }
3039          if (!hasRecords()) {
3040            memoizedIsInitialized = 0;
3041            return false;
3042          }
3043          if (!hasSegmentTxnId()) {
3044            memoizedIsInitialized = 0;
3045            return false;
3046          }
3047          if (!getReqInfo().isInitialized()) {
3048            memoizedIsInitialized = 0;
3049            return false;
3050          }
3051          memoizedIsInitialized = 1;
3052          return true;
3053        }
3054    
3055        public void writeTo(com.google.protobuf.CodedOutputStream output)
3056                            throws java.io.IOException {
3057          getSerializedSize();
3058          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3059            output.writeMessage(1, reqInfo_);
3060          }
3061          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3062            output.writeUInt64(2, firstTxnId_);
3063          }
3064          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3065            output.writeUInt32(3, numTxns_);
3066          }
3067          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3068            output.writeBytes(4, records_);
3069          }
3070          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3071            output.writeUInt64(5, segmentTxnId_);
3072          }
3073          getUnknownFields().writeTo(output);
3074        }
3075    
3076        private int memoizedSerializedSize = -1;
3077        public int getSerializedSize() {
3078          int size = memoizedSerializedSize;
3079          if (size != -1) return size;
3080    
3081          size = 0;
3082          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3083            size += com.google.protobuf.CodedOutputStream
3084              .computeMessageSize(1, reqInfo_);
3085          }
3086          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3087            size += com.google.protobuf.CodedOutputStream
3088              .computeUInt64Size(2, firstTxnId_);
3089          }
3090          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3091            size += com.google.protobuf.CodedOutputStream
3092              .computeUInt32Size(3, numTxns_);
3093          }
3094          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3095            size += com.google.protobuf.CodedOutputStream
3096              .computeBytesSize(4, records_);
3097          }
3098          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3099            size += com.google.protobuf.CodedOutputStream
3100              .computeUInt64Size(5, segmentTxnId_);
3101          }
3102          size += getUnknownFields().getSerializedSize();
3103          memoizedSerializedSize = size;
3104          return size;
3105        }
3106    
3107        private static final long serialVersionUID = 0L;
3108        @java.lang.Override
3109        protected java.lang.Object writeReplace()
3110            throws java.io.ObjectStreamException {
3111          return super.writeReplace();
3112        }
3113    
3114        @java.lang.Override
3115        public boolean equals(final java.lang.Object obj) {
3116          if (obj == this) {
3117           return true;
3118          }
3119          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
3120            return super.equals(obj);
3121          }
3122          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
3123    
3124          boolean result = true;
3125          result = result && (hasReqInfo() == other.hasReqInfo());
3126          if (hasReqInfo()) {
3127            result = result && getReqInfo()
3128                .equals(other.getReqInfo());
3129          }
3130          result = result && (hasFirstTxnId() == other.hasFirstTxnId());
3131          if (hasFirstTxnId()) {
3132            result = result && (getFirstTxnId()
3133                == other.getFirstTxnId());
3134          }
3135          result = result && (hasNumTxns() == other.hasNumTxns());
3136          if (hasNumTxns()) {
3137            result = result && (getNumTxns()
3138                == other.getNumTxns());
3139          }
3140          result = result && (hasRecords() == other.hasRecords());
3141          if (hasRecords()) {
3142            result = result && getRecords()
3143                .equals(other.getRecords());
3144          }
3145          result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
3146          if (hasSegmentTxnId()) {
3147            result = result && (getSegmentTxnId()
3148                == other.getSegmentTxnId());
3149          }
3150          result = result &&
3151              getUnknownFields().equals(other.getUnknownFields());
3152          return result;
3153        }
3154    
3155        private int memoizedHashCode = 0;
3156        @java.lang.Override
3157        public int hashCode() {
3158          if (memoizedHashCode != 0) {
3159            return memoizedHashCode;
3160          }
3161          int hash = 41;
3162          hash = (19 * hash) + getDescriptorForType().hashCode();
3163          if (hasReqInfo()) {
3164            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
3165            hash = (53 * hash) + getReqInfo().hashCode();
3166          }
3167          if (hasFirstTxnId()) {
3168            hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
3169            hash = (53 * hash) + hashLong(getFirstTxnId());
3170          }
3171          if (hasNumTxns()) {
3172            hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
3173            hash = (53 * hash) + getNumTxns();
3174          }
3175          if (hasRecords()) {
3176            hash = (37 * hash) + RECORDS_FIELD_NUMBER;
3177            hash = (53 * hash) + getRecords().hashCode();
3178          }
3179          if (hasSegmentTxnId()) {
3180            hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
3181            hash = (53 * hash) + hashLong(getSegmentTxnId());
3182          }
3183          hash = (29 * hash) + getUnknownFields().hashCode();
3184          memoizedHashCode = hash;
3185          return hash;
3186        }
3187    
3188        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3189            com.google.protobuf.ByteString data)
3190            throws com.google.protobuf.InvalidProtocolBufferException {
3191          return PARSER.parseFrom(data);
3192        }
3193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3194            com.google.protobuf.ByteString data,
3195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3196            throws com.google.protobuf.InvalidProtocolBufferException {
3197          return PARSER.parseFrom(data, extensionRegistry);
3198        }
3199        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
3200            throws com.google.protobuf.InvalidProtocolBufferException {
3201          return PARSER.parseFrom(data);
3202        }
3203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3204            byte[] data,
3205            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3206            throws com.google.protobuf.InvalidProtocolBufferException {
3207          return PARSER.parseFrom(data, extensionRegistry);
3208        }
3209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
3210            throws java.io.IOException {
3211          return PARSER.parseFrom(input);
3212        }
3213        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3214            java.io.InputStream input,
3215            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3216            throws java.io.IOException {
3217          return PARSER.parseFrom(input, extensionRegistry);
3218        }
3219        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
3220            throws java.io.IOException {
3221          return PARSER.parseDelimitedFrom(input);
3222        }
3223        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
3224            java.io.InputStream input,
3225            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3226            throws java.io.IOException {
3227          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3228        }
3229        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3230            com.google.protobuf.CodedInputStream input)
3231            throws java.io.IOException {
3232          return PARSER.parseFrom(input);
3233        }
3234        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3235            com.google.protobuf.CodedInputStream input,
3236            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3237            throws java.io.IOException {
3238          return PARSER.parseFrom(input, extensionRegistry);
3239        }
3240    
3241        public static Builder newBuilder() { return Builder.create(); }
3242        public Builder newBuilderForType() { return newBuilder(); }
3243        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
3244          return newBuilder().mergeFrom(prototype);
3245        }
3246        public Builder toBuilder() { return newBuilder(this); }
3247    
3248        @java.lang.Override
3249        protected Builder newBuilderForType(
3250            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3251          Builder builder = new Builder(parent);
3252          return builder;
3253        }
3254        /**
3255         * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
3256         */
3257        public static final class Builder extends
3258            com.google.protobuf.GeneratedMessage.Builder<Builder>
3259           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
3260          public static final com.google.protobuf.Descriptors.Descriptor
3261              getDescriptor() {
3262            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3263          }
3264    
3265          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3266              internalGetFieldAccessorTable() {
3267            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
3268                .ensureFieldAccessorsInitialized(
3269                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
3270          }
3271    
3272          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
3273          private Builder() {
3274            maybeForceBuilderInitialization();
3275          }
3276    
3277          private Builder(
3278              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3279            super(parent);
3280            maybeForceBuilderInitialization();
3281          }
3282          private void maybeForceBuilderInitialization() {
3283            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3284              getReqInfoFieldBuilder();
3285            }
3286          }
3287          private static Builder create() {
3288            return new Builder();
3289          }
3290    
3291          public Builder clear() {
3292            super.clear();
3293            if (reqInfoBuilder_ == null) {
3294              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3295            } else {
3296              reqInfoBuilder_.clear();
3297            }
3298            bitField0_ = (bitField0_ & ~0x00000001);
3299            firstTxnId_ = 0L;
3300            bitField0_ = (bitField0_ & ~0x00000002);
3301            numTxns_ = 0;
3302            bitField0_ = (bitField0_ & ~0x00000004);
3303            records_ = com.google.protobuf.ByteString.EMPTY;
3304            bitField0_ = (bitField0_ & ~0x00000008);
3305            segmentTxnId_ = 0L;
3306            bitField0_ = (bitField0_ & ~0x00000010);
3307            return this;
3308          }
3309    
3310          public Builder clone() {
3311            return create().mergeFrom(buildPartial());
3312          }
3313    
3314          public com.google.protobuf.Descriptors.Descriptor
3315              getDescriptorForType() {
3316            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3317          }
3318    
3319          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
3320            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
3321          }
3322    
3323          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
3324            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
3325            if (!result.isInitialized()) {
3326              throw newUninitializedMessageException(result);
3327            }
3328            return result;
3329          }
3330    
3331          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
3332            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
3333            int from_bitField0_ = bitField0_;
3334            int to_bitField0_ = 0;
3335            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3336              to_bitField0_ |= 0x00000001;
3337            }
3338            if (reqInfoBuilder_ == null) {
3339              result.reqInfo_ = reqInfo_;
3340            } else {
3341              result.reqInfo_ = reqInfoBuilder_.build();
3342            }
3343            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3344              to_bitField0_ |= 0x00000002;
3345            }
3346            result.firstTxnId_ = firstTxnId_;
3347            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
3348              to_bitField0_ |= 0x00000004;
3349            }
3350            result.numTxns_ = numTxns_;
3351            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
3352              to_bitField0_ |= 0x00000008;
3353            }
3354            result.records_ = records_;
3355            if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
3356              to_bitField0_ |= 0x00000010;
3357            }
3358            result.segmentTxnId_ = segmentTxnId_;
3359            result.bitField0_ = to_bitField0_;
3360            onBuilt();
3361            return result;
3362          }
3363    
3364          public Builder mergeFrom(com.google.protobuf.Message other) {
3365            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
3366              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
3367            } else {
3368              super.mergeFrom(other);
3369              return this;
3370            }
3371          }
3372    
3373          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
3374            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
3375            if (other.hasReqInfo()) {
3376              mergeReqInfo(other.getReqInfo());
3377            }
3378            if (other.hasFirstTxnId()) {
3379              setFirstTxnId(other.getFirstTxnId());
3380            }
3381            if (other.hasNumTxns()) {
3382              setNumTxns(other.getNumTxns());
3383            }
3384            if (other.hasRecords()) {
3385              setRecords(other.getRecords());
3386            }
3387            if (other.hasSegmentTxnId()) {
3388              setSegmentTxnId(other.getSegmentTxnId());
3389            }
3390            this.mergeUnknownFields(other.getUnknownFields());
3391            return this;
3392          }
3393    
3394          public final boolean isInitialized() {
3395            if (!hasReqInfo()) {
3396              
3397              return false;
3398            }
3399            if (!hasFirstTxnId()) {
3400              
3401              return false;
3402            }
3403            if (!hasNumTxns()) {
3404              
3405              return false;
3406            }
3407            if (!hasRecords()) {
3408              
3409              return false;
3410            }
3411            if (!hasSegmentTxnId()) {
3412              
3413              return false;
3414            }
3415            if (!getReqInfo().isInitialized()) {
3416              
3417              return false;
3418            }
3419            return true;
3420          }
3421    
3422          public Builder mergeFrom(
3423              com.google.protobuf.CodedInputStream input,
3424              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3425              throws java.io.IOException {
3426            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parsedMessage = null;
3427            try {
3428              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3429            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3430              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) e.getUnfinishedMessage();
3431              throw e;
3432            } finally {
3433              if (parsedMessage != null) {
3434                mergeFrom(parsedMessage);
3435              }
3436            }
3437            return this;
3438          }
3439          private int bitField0_;
3440    
3441          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3442          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3443          private com.google.protobuf.SingleFieldBuilder<
3444              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
3445          /**
3446           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3447           */
3448          public boolean hasReqInfo() {
3449            return ((bitField0_ & 0x00000001) == 0x00000001);
3450          }
3451          /**
3452           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3453           */
3454          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3455            if (reqInfoBuilder_ == null) {
3456              return reqInfo_;
3457            } else {
3458              return reqInfoBuilder_.getMessage();
3459            }
3460          }
3461          /**
3462           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3463           */
3464          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3465            if (reqInfoBuilder_ == null) {
3466              if (value == null) {
3467                throw new NullPointerException();
3468              }
3469              reqInfo_ = value;
3470              onChanged();
3471            } else {
3472              reqInfoBuilder_.setMessage(value);
3473            }
3474            bitField0_ |= 0x00000001;
3475            return this;
3476          }
3477          /**
3478           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3479           */
3480          public Builder setReqInfo(
3481              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
3482            if (reqInfoBuilder_ == null) {
3483              reqInfo_ = builderForValue.build();
3484              onChanged();
3485            } else {
3486              reqInfoBuilder_.setMessage(builderForValue.build());
3487            }
3488            bitField0_ |= 0x00000001;
3489            return this;
3490          }
3491          /**
3492           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3493           */
3494          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3495            if (reqInfoBuilder_ == null) {
3496              if (((bitField0_ & 0x00000001) == 0x00000001) &&
3497                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
3498                reqInfo_ =
3499                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
3500              } else {
3501                reqInfo_ = value;
3502              }
3503              onChanged();
3504            } else {
3505              reqInfoBuilder_.mergeFrom(value);
3506            }
3507            bitField0_ |= 0x00000001;
3508            return this;
3509          }
3510          /**
3511           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3512           */
3513          public Builder clearReqInfo() {
3514            if (reqInfoBuilder_ == null) {
3515              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3516              onChanged();
3517            } else {
3518              reqInfoBuilder_.clear();
3519            }
3520            bitField0_ = (bitField0_ & ~0x00000001);
3521            return this;
3522          }
3523          /**
3524           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3525           */
3526          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
3527            bitField0_ |= 0x00000001;
3528            onChanged();
3529            return getReqInfoFieldBuilder().getBuilder();
3530          }
3531          /**
3532           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3533           */
3534          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3535            if (reqInfoBuilder_ != null) {
3536              return reqInfoBuilder_.getMessageOrBuilder();
3537            } else {
3538              return reqInfo_;
3539            }
3540          }
3541          /**
3542           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3543           */
3544          private com.google.protobuf.SingleFieldBuilder<
3545              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
3546              getReqInfoFieldBuilder() {
3547            if (reqInfoBuilder_ == null) {
3548              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3549                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
3550                      reqInfo_,
3551                      getParentForChildren(),
3552                      isClean());
3553              reqInfo_ = null;
3554            }
3555            return reqInfoBuilder_;
3556          }
3557    
3558          // required uint64 firstTxnId = 2;
3559          private long firstTxnId_ ;
3560          /**
3561           * <code>required uint64 firstTxnId = 2;</code>
3562           */
3563          public boolean hasFirstTxnId() {
3564            return ((bitField0_ & 0x00000002) == 0x00000002);
3565          }
3566          /**
3567           * <code>required uint64 firstTxnId = 2;</code>
3568           */
3569          public long getFirstTxnId() {
3570            return firstTxnId_;
3571          }
3572          /**
3573           * <code>required uint64 firstTxnId = 2;</code>
3574           */
3575          public Builder setFirstTxnId(long value) {
3576            bitField0_ |= 0x00000002;
3577            firstTxnId_ = value;
3578            onChanged();
3579            return this;
3580          }
3581          /**
3582           * <code>required uint64 firstTxnId = 2;</code>
3583           */
3584          public Builder clearFirstTxnId() {
3585            bitField0_ = (bitField0_ & ~0x00000002);
3586            firstTxnId_ = 0L;
3587            onChanged();
3588            return this;
3589          }
3590    
3591          // required uint32 numTxns = 3;
3592          private int numTxns_ ;
3593          /**
3594           * <code>required uint32 numTxns = 3;</code>
3595           */
3596          public boolean hasNumTxns() {
3597            return ((bitField0_ & 0x00000004) == 0x00000004);
3598          }
3599          /**
3600           * <code>required uint32 numTxns = 3;</code>
3601           */
3602          public int getNumTxns() {
3603            return numTxns_;
3604          }
3605          /**
3606           * <code>required uint32 numTxns = 3;</code>
3607           */
3608          public Builder setNumTxns(int value) {
3609            bitField0_ |= 0x00000004;
3610            numTxns_ = value;
3611            onChanged();
3612            return this;
3613          }
3614          /**
3615           * <code>required uint32 numTxns = 3;</code>
3616           */
3617          public Builder clearNumTxns() {
3618            bitField0_ = (bitField0_ & ~0x00000004);
3619            numTxns_ = 0;
3620            onChanged();
3621            return this;
3622          }
3623    
3624          // required bytes records = 4;
3625          private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
3626          /**
3627           * <code>required bytes records = 4;</code>
3628           */
3629          public boolean hasRecords() {
3630            return ((bitField0_ & 0x00000008) == 0x00000008);
3631          }
3632          /**
3633           * <code>required bytes records = 4;</code>
3634           */
3635          public com.google.protobuf.ByteString getRecords() {
3636            return records_;
3637          }
3638          /**
3639           * <code>required bytes records = 4;</code>
3640           */
3641          public Builder setRecords(com.google.protobuf.ByteString value) {
3642            if (value == null) {
3643        throw new NullPointerException();
3644      }
3645      bitField0_ |= 0x00000008;
3646            records_ = value;
3647            onChanged();
3648            return this;
3649          }
3650          /**
3651           * <code>required bytes records = 4;</code>
3652           */
3653          public Builder clearRecords() {
3654            bitField0_ = (bitField0_ & ~0x00000008);
3655            records_ = getDefaultInstance().getRecords();
3656            onChanged();
3657            return this;
3658          }
3659    
3660          // required uint64 segmentTxnId = 5;
3661          private long segmentTxnId_ ;
3662          /**
3663           * <code>required uint64 segmentTxnId = 5;</code>
3664           */
3665          public boolean hasSegmentTxnId() {
3666            return ((bitField0_ & 0x00000010) == 0x00000010);
3667          }
3668          /**
3669           * <code>required uint64 segmentTxnId = 5;</code>
3670           */
3671          public long getSegmentTxnId() {
3672            return segmentTxnId_;
3673          }
3674          /**
3675           * <code>required uint64 segmentTxnId = 5;</code>
3676           */
3677          public Builder setSegmentTxnId(long value) {
3678            bitField0_ |= 0x00000010;
3679            segmentTxnId_ = value;
3680            onChanged();
3681            return this;
3682          }
3683          /**
3684           * <code>required uint64 segmentTxnId = 5;</code>
3685           */
3686          public Builder clearSegmentTxnId() {
3687            bitField0_ = (bitField0_ & ~0x00000010);
3688            segmentTxnId_ = 0L;
3689            onChanged();
3690            return this;
3691          }
3692    
3693          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalRequestProto)
3694        }
3695    
3696        static {
3697          defaultInstance = new JournalRequestProto(true);
3698          defaultInstance.initFields();
3699        }
3700    
3701        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalRequestProto)
3702      }
3703    
3704      public interface JournalResponseProtoOrBuilder
3705          extends com.google.protobuf.MessageOrBuilder {
3706      }
3707      /**
3708       * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3709       */
3710      public static final class JournalResponseProto extends
3711          com.google.protobuf.GeneratedMessage
3712          implements JournalResponseProtoOrBuilder {
3713        // Use JournalResponseProto.newBuilder() to construct.
3714        private JournalResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3715          super(builder);
3716          this.unknownFields = builder.getUnknownFields();
3717        }
3718        private JournalResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3719    
3720        private static final JournalResponseProto defaultInstance;
3721        public static JournalResponseProto getDefaultInstance() {
3722          return defaultInstance;
3723        }
3724    
3725        public JournalResponseProto getDefaultInstanceForType() {
3726          return defaultInstance;
3727        }
3728    
3729        private final com.google.protobuf.UnknownFieldSet unknownFields;
3730        @java.lang.Override
3731        public final com.google.protobuf.UnknownFieldSet
3732            getUnknownFields() {
3733          return this.unknownFields;
3734        }
3735        private JournalResponseProto(
3736            com.google.protobuf.CodedInputStream input,
3737            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3738            throws com.google.protobuf.InvalidProtocolBufferException {
3739          initFields();
3740          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3741              com.google.protobuf.UnknownFieldSet.newBuilder();
3742          try {
3743            boolean done = false;
3744            while (!done) {
3745              int tag = input.readTag();
3746              switch (tag) {
3747                case 0:
3748                  done = true;
3749                  break;
3750                default: {
3751                  if (!parseUnknownField(input, unknownFields,
3752                                         extensionRegistry, tag)) {
3753                    done = true;
3754                  }
3755                  break;
3756                }
3757              }
3758            }
3759          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3760            throw e.setUnfinishedMessage(this);
3761          } catch (java.io.IOException e) {
3762            throw new com.google.protobuf.InvalidProtocolBufferException(
3763                e.getMessage()).setUnfinishedMessage(this);
3764          } finally {
3765            this.unknownFields = unknownFields.build();
3766            makeExtensionsImmutable();
3767          }
3768        }
3769        public static final com.google.protobuf.Descriptors.Descriptor
3770            getDescriptor() {
3771          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3772        }
3773    
3774        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3775            internalGetFieldAccessorTable() {
3776          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3777              .ensureFieldAccessorsInitialized(
3778                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3779        }
3780    
3781        public static com.google.protobuf.Parser<JournalResponseProto> PARSER =
3782            new com.google.protobuf.AbstractParser<JournalResponseProto>() {
3783          public JournalResponseProto parsePartialFrom(
3784              com.google.protobuf.CodedInputStream input,
3785              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3786              throws com.google.protobuf.InvalidProtocolBufferException {
3787            return new JournalResponseProto(input, extensionRegistry);
3788          }
3789        };
3790    
3791        @java.lang.Override
3792        public com.google.protobuf.Parser<JournalResponseProto> getParserForType() {
3793          return PARSER;
3794        }
3795    
3796        private void initFields() {
3797        }
3798        private byte memoizedIsInitialized = -1;
3799        public final boolean isInitialized() {
3800          byte isInitialized = memoizedIsInitialized;
3801          if (isInitialized != -1) return isInitialized == 1;
3802    
3803          memoizedIsInitialized = 1;
3804          return true;
3805        }
3806    
3807        public void writeTo(com.google.protobuf.CodedOutputStream output)
3808                            throws java.io.IOException {
3809          getSerializedSize();
3810          getUnknownFields().writeTo(output);
3811        }
3812    
3813        private int memoizedSerializedSize = -1;
3814        public int getSerializedSize() {
3815          int size = memoizedSerializedSize;
3816          if (size != -1) return size;
3817    
3818          size = 0;
3819          size += getUnknownFields().getSerializedSize();
3820          memoizedSerializedSize = size;
3821          return size;
3822        }
3823    
3824        private static final long serialVersionUID = 0L;
3825        @java.lang.Override
3826        protected java.lang.Object writeReplace()
3827            throws java.io.ObjectStreamException {
3828          return super.writeReplace();
3829        }
3830    
3831        @java.lang.Override
3832        public boolean equals(final java.lang.Object obj) {
3833          if (obj == this) {
3834           return true;
3835          }
3836          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
3837            return super.equals(obj);
3838          }
3839          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
3840    
3841          boolean result = true;
3842          result = result &&
3843              getUnknownFields().equals(other.getUnknownFields());
3844          return result;
3845        }
3846    
3847        private int memoizedHashCode = 0;
3848        @java.lang.Override
3849        public int hashCode() {
3850          if (memoizedHashCode != 0) {
3851            return memoizedHashCode;
3852          }
3853          int hash = 41;
3854          hash = (19 * hash) + getDescriptorForType().hashCode();
3855          hash = (29 * hash) + getUnknownFields().hashCode();
3856          memoizedHashCode = hash;
3857          return hash;
3858        }
3859    
3860        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3861            com.google.protobuf.ByteString data)
3862            throws com.google.protobuf.InvalidProtocolBufferException {
3863          return PARSER.parseFrom(data);
3864        }
3865        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3866            com.google.protobuf.ByteString data,
3867            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3868            throws com.google.protobuf.InvalidProtocolBufferException {
3869          return PARSER.parseFrom(data, extensionRegistry);
3870        }
3871        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
3872            throws com.google.protobuf.InvalidProtocolBufferException {
3873          return PARSER.parseFrom(data);
3874        }
3875        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3876            byte[] data,
3877            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3878            throws com.google.protobuf.InvalidProtocolBufferException {
3879          return PARSER.parseFrom(data, extensionRegistry);
3880        }
3881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
3882            throws java.io.IOException {
3883          return PARSER.parseFrom(input);
3884        }
3885        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3886            java.io.InputStream input,
3887            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3888            throws java.io.IOException {
3889          return PARSER.parseFrom(input, extensionRegistry);
3890        }
3891        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
3892            throws java.io.IOException {
3893          return PARSER.parseDelimitedFrom(input);
3894        }
3895        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
3896            java.io.InputStream input,
3897            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3898            throws java.io.IOException {
3899          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3900        }
3901        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3902            com.google.protobuf.CodedInputStream input)
3903            throws java.io.IOException {
3904          return PARSER.parseFrom(input);
3905        }
3906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3907            com.google.protobuf.CodedInputStream input,
3908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3909            throws java.io.IOException {
3910          return PARSER.parseFrom(input, extensionRegistry);
3911        }
3912    
3913        public static Builder newBuilder() { return Builder.create(); }
3914        public Builder newBuilderForType() { return newBuilder(); }
3915        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
3916          return newBuilder().mergeFrom(prototype);
3917        }
3918        public Builder toBuilder() { return newBuilder(this); }
3919    
3920        @java.lang.Override
3921        protected Builder newBuilderForType(
3922            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3923          Builder builder = new Builder(parent);
3924          return builder;
3925        }
3926        /**
3927         * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3928         */
3929        public static final class Builder extends
3930            com.google.protobuf.GeneratedMessage.Builder<Builder>
3931           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
3932          public static final com.google.protobuf.Descriptors.Descriptor
3933              getDescriptor() {
3934            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3935          }
3936    
3937          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3938              internalGetFieldAccessorTable() {
3939            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3940                .ensureFieldAccessorsInitialized(
3941                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3942          }
3943    
3944          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
3945          private Builder() {
3946            maybeForceBuilderInitialization();
3947          }
3948    
3949          private Builder(
3950              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3951            super(parent);
3952            maybeForceBuilderInitialization();
3953          }
3954          private void maybeForceBuilderInitialization() {
3955            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3956            }
3957          }
3958          private static Builder create() {
3959            return new Builder();
3960          }
3961    
3962          public Builder clear() {
3963            super.clear();
3964            return this;
3965          }
3966    
3967          public Builder clone() {
3968            return create().mergeFrom(buildPartial());
3969          }
3970    
3971          public com.google.protobuf.Descriptors.Descriptor
3972              getDescriptorForType() {
3973            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3974          }
3975    
3976          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
3977            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
3978          }
3979    
3980          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
3981            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3982            if (!result.isInitialized()) {
3983              throw newUninitializedMessageException(result);
3984            }
3985            return result;
3986          }
3987    
3988          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
3989            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
3990            onBuilt();
3991            return result;
3992          }
3993    
3994          public Builder mergeFrom(com.google.protobuf.Message other) {
3995            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
3996              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
3997            } else {
3998              super.mergeFrom(other);
3999              return this;
4000            }
4001          }
4002    
4003          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
4004            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
4005            this.mergeUnknownFields(other.getUnknownFields());
4006            return this;
4007          }
4008    
4009          public final boolean isInitialized() {
4010            return true;
4011          }
4012    
4013          public Builder mergeFrom(
4014              com.google.protobuf.CodedInputStream input,
4015              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4016              throws java.io.IOException {
4017            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parsedMessage = null;
4018            try {
4019              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4020            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4021              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) e.getUnfinishedMessage();
4022              throw e;
4023            } finally {
4024              if (parsedMessage != null) {
4025                mergeFrom(parsedMessage);
4026              }
4027            }
4028            return this;
4029          }
4030    
4031          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalResponseProto)
4032        }
4033    
4034        static {
4035          defaultInstance = new JournalResponseProto(true);
4036          defaultInstance.initFields();
4037        }
4038    
4039        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalResponseProto)
4040      }
4041    
4042      public interface HeartbeatRequestProtoOrBuilder
4043          extends com.google.protobuf.MessageOrBuilder {
4044    
4045        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4046        /**
4047         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4048         */
4049        boolean hasReqInfo();
4050        /**
4051         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4052         */
4053        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4054        /**
4055         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4056         */
4057        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4058      }
4059      /**
4060       * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4061       */
4062      public static final class HeartbeatRequestProto extends
4063          com.google.protobuf.GeneratedMessage
4064          implements HeartbeatRequestProtoOrBuilder {
4065        // Use HeartbeatRequestProto.newBuilder() to construct.
4066        private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4067          super(builder);
4068          this.unknownFields = builder.getUnknownFields();
4069        }
4070        private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4071    
4072        private static final HeartbeatRequestProto defaultInstance;
4073        public static HeartbeatRequestProto getDefaultInstance() {
4074          return defaultInstance;
4075        }
4076    
4077        public HeartbeatRequestProto getDefaultInstanceForType() {
4078          return defaultInstance;
4079        }
4080    
4081        private final com.google.protobuf.UnknownFieldSet unknownFields;
4082        @java.lang.Override
4083        public final com.google.protobuf.UnknownFieldSet
4084            getUnknownFields() {
4085          return this.unknownFields;
4086        }
4087        private HeartbeatRequestProto(
4088            com.google.protobuf.CodedInputStream input,
4089            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4090            throws com.google.protobuf.InvalidProtocolBufferException {
4091          initFields();
4092          int mutable_bitField0_ = 0;
4093          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4094              com.google.protobuf.UnknownFieldSet.newBuilder();
4095          try {
4096            boolean done = false;
4097            while (!done) {
4098              int tag = input.readTag();
4099              switch (tag) {
4100                case 0:
4101                  done = true;
4102                  break;
4103                default: {
4104                  if (!parseUnknownField(input, unknownFields,
4105                                         extensionRegistry, tag)) {
4106                    done = true;
4107                  }
4108                  break;
4109                }
4110                case 10: {
4111                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
4112                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
4113                    subBuilder = reqInfo_.toBuilder();
4114                  }
4115                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
4116                  if (subBuilder != null) {
4117                    subBuilder.mergeFrom(reqInfo_);
4118                    reqInfo_ = subBuilder.buildPartial();
4119                  }
4120                  bitField0_ |= 0x00000001;
4121                  break;
4122                }
4123              }
4124            }
4125          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4126            throw e.setUnfinishedMessage(this);
4127          } catch (java.io.IOException e) {
4128            throw new com.google.protobuf.InvalidProtocolBufferException(
4129                e.getMessage()).setUnfinishedMessage(this);
4130          } finally {
4131            this.unknownFields = unknownFields.build();
4132            makeExtensionsImmutable();
4133          }
4134        }
4135        public static final com.google.protobuf.Descriptors.Descriptor
4136            getDescriptor() {
4137          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4138        }
4139    
4140        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4141            internalGetFieldAccessorTable() {
4142          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4143              .ensureFieldAccessorsInitialized(
4144                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4145        }
4146    
4147        public static com.google.protobuf.Parser<HeartbeatRequestProto> PARSER =
4148            new com.google.protobuf.AbstractParser<HeartbeatRequestProto>() {
4149          public HeartbeatRequestProto parsePartialFrom(
4150              com.google.protobuf.CodedInputStream input,
4151              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4152              throws com.google.protobuf.InvalidProtocolBufferException {
4153            return new HeartbeatRequestProto(input, extensionRegistry);
4154          }
4155        };
4156    
4157        @java.lang.Override
4158        public com.google.protobuf.Parser<HeartbeatRequestProto> getParserForType() {
4159          return PARSER;
4160        }
4161    
4162        private int bitField0_;
4163        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4164        public static final int REQINFO_FIELD_NUMBER = 1;
4165        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4166        /**
4167         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4168         */
4169        public boolean hasReqInfo() {
4170          return ((bitField0_ & 0x00000001) == 0x00000001);
4171        }
4172        /**
4173         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4174         */
4175        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4176          return reqInfo_;
4177        }
4178        /**
4179         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4180         */
4181        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4182          return reqInfo_;
4183        }
4184    
4185        private void initFields() {
4186          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4187        }
4188        private byte memoizedIsInitialized = -1;
4189        public final boolean isInitialized() {
4190          byte isInitialized = memoizedIsInitialized;
4191          if (isInitialized != -1) return isInitialized == 1;
4192    
4193          if (!hasReqInfo()) {
4194            memoizedIsInitialized = 0;
4195            return false;
4196          }
4197          if (!getReqInfo().isInitialized()) {
4198            memoizedIsInitialized = 0;
4199            return false;
4200          }
4201          memoizedIsInitialized = 1;
4202          return true;
4203        }
4204    
4205        public void writeTo(com.google.protobuf.CodedOutputStream output)
4206                            throws java.io.IOException {
4207          getSerializedSize();
4208          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4209            output.writeMessage(1, reqInfo_);
4210          }
4211          getUnknownFields().writeTo(output);
4212        }
4213    
4214        private int memoizedSerializedSize = -1;
4215        public int getSerializedSize() {
4216          int size = memoizedSerializedSize;
4217          if (size != -1) return size;
4218    
4219          size = 0;
4220          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4221            size += com.google.protobuf.CodedOutputStream
4222              .computeMessageSize(1, reqInfo_);
4223          }
4224          size += getUnknownFields().getSerializedSize();
4225          memoizedSerializedSize = size;
4226          return size;
4227        }
4228    
4229        private static final long serialVersionUID = 0L;
4230        @java.lang.Override
4231        protected java.lang.Object writeReplace()
4232            throws java.io.ObjectStreamException {
4233          return super.writeReplace();
4234        }
4235    
4236        @java.lang.Override
4237        public boolean equals(final java.lang.Object obj) {
4238          if (obj == this) {
4239           return true;
4240          }
4241          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
4242            return super.equals(obj);
4243          }
4244          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
4245    
4246          boolean result = true;
4247          result = result && (hasReqInfo() == other.hasReqInfo());
4248          if (hasReqInfo()) {
4249            result = result && getReqInfo()
4250                .equals(other.getReqInfo());
4251          }
4252          result = result &&
4253              getUnknownFields().equals(other.getUnknownFields());
4254          return result;
4255        }
4256    
4257        private int memoizedHashCode = 0;
4258        @java.lang.Override
4259        public int hashCode() {
4260          if (memoizedHashCode != 0) {
4261            return memoizedHashCode;
4262          }
4263          int hash = 41;
4264          hash = (19 * hash) + getDescriptorForType().hashCode();
4265          if (hasReqInfo()) {
4266            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
4267            hash = (53 * hash) + getReqInfo().hashCode();
4268          }
4269          hash = (29 * hash) + getUnknownFields().hashCode();
4270          memoizedHashCode = hash;
4271          return hash;
4272        }
4273    
4274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4275            com.google.protobuf.ByteString data)
4276            throws com.google.protobuf.InvalidProtocolBufferException {
4277          return PARSER.parseFrom(data);
4278        }
4279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4280            com.google.protobuf.ByteString data,
4281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4282            throws com.google.protobuf.InvalidProtocolBufferException {
4283          return PARSER.parseFrom(data, extensionRegistry);
4284        }
4285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
4286            throws com.google.protobuf.InvalidProtocolBufferException {
4287          return PARSER.parseFrom(data);
4288        }
4289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4290            byte[] data,
4291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4292            throws com.google.protobuf.InvalidProtocolBufferException {
4293          return PARSER.parseFrom(data, extensionRegistry);
4294        }
4295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
4296            throws java.io.IOException {
4297          return PARSER.parseFrom(input);
4298        }
4299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4300            java.io.InputStream input,
4301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4302            throws java.io.IOException {
4303          return PARSER.parseFrom(input, extensionRegistry);
4304        }
4305        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
4306            throws java.io.IOException {
4307          return PARSER.parseDelimitedFrom(input);
4308        }
4309        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
4310            java.io.InputStream input,
4311            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4312            throws java.io.IOException {
4313          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4314        }
4315        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4316            com.google.protobuf.CodedInputStream input)
4317            throws java.io.IOException {
4318          return PARSER.parseFrom(input);
4319        }
4320        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4321            com.google.protobuf.CodedInputStream input,
4322            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4323            throws java.io.IOException {
4324          return PARSER.parseFrom(input, extensionRegistry);
4325        }
4326    
4327        public static Builder newBuilder() { return Builder.create(); }
4328        public Builder newBuilderForType() { return newBuilder(); }
4329        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
4330          return newBuilder().mergeFrom(prototype);
4331        }
4332        public Builder toBuilder() { return newBuilder(this); }
4333    
4334        @java.lang.Override
4335        protected Builder newBuilderForType(
4336            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4337          Builder builder = new Builder(parent);
4338          return builder;
4339        }
4340        /**
4341         * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4342         */
4343        public static final class Builder extends
4344            com.google.protobuf.GeneratedMessage.Builder<Builder>
4345           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
4346          public static final com.google.protobuf.Descriptors.Descriptor
4347              getDescriptor() {
4348            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4349          }
4350    
4351          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4352              internalGetFieldAccessorTable() {
4353            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4354                .ensureFieldAccessorsInitialized(
4355                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4356          }
4357    
4358          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
4359          private Builder() {
4360            maybeForceBuilderInitialization();
4361          }
4362    
4363          private Builder(
4364              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4365            super(parent);
4366            maybeForceBuilderInitialization();
4367          }
4368          private void maybeForceBuilderInitialization() {
4369            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4370              getReqInfoFieldBuilder();
4371            }
4372          }
4373          private static Builder create() {
4374            return new Builder();
4375          }
4376    
4377          public Builder clear() {
4378            super.clear();
4379            if (reqInfoBuilder_ == null) {
4380              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4381            } else {
4382              reqInfoBuilder_.clear();
4383            }
4384            bitField0_ = (bitField0_ & ~0x00000001);
4385            return this;
4386          }
4387    
4388          public Builder clone() {
4389            return create().mergeFrom(buildPartial());
4390          }
4391    
4392          public com.google.protobuf.Descriptors.Descriptor
4393              getDescriptorForType() {
4394            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4395          }
4396    
4397          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
4398            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
4399          }
4400    
4401          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
4402            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
4403            if (!result.isInitialized()) {
4404              throw newUninitializedMessageException(result);
4405            }
4406            return result;
4407          }
4408    
4409          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
4410            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
4411            int from_bitField0_ = bitField0_;
4412            int to_bitField0_ = 0;
4413            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4414              to_bitField0_ |= 0x00000001;
4415            }
4416            if (reqInfoBuilder_ == null) {
4417              result.reqInfo_ = reqInfo_;
4418            } else {
4419              result.reqInfo_ = reqInfoBuilder_.build();
4420            }
4421            result.bitField0_ = to_bitField0_;
4422            onBuilt();
4423            return result;
4424          }
4425    
4426          public Builder mergeFrom(com.google.protobuf.Message other) {
4427            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
4428              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
4429            } else {
4430              super.mergeFrom(other);
4431              return this;
4432            }
4433          }
4434    
4435          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
4436            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
4437            if (other.hasReqInfo()) {
4438              mergeReqInfo(other.getReqInfo());
4439            }
4440            this.mergeUnknownFields(other.getUnknownFields());
4441            return this;
4442          }
4443    
4444          public final boolean isInitialized() {
4445            if (!hasReqInfo()) {
4446              
4447              return false;
4448            }
4449            if (!getReqInfo().isInitialized()) {
4450              
4451              return false;
4452            }
4453            return true;
4454          }
4455    
4456          public Builder mergeFrom(
4457              com.google.protobuf.CodedInputStream input,
4458              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4459              throws java.io.IOException {
4460            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parsedMessage = null;
4461            try {
4462              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4463            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4464              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage();
4465              throw e;
4466            } finally {
4467              if (parsedMessage != null) {
4468                mergeFrom(parsedMessage);
4469              }
4470            }
4471            return this;
4472          }
4473          private int bitField0_;
4474    
4475          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4476          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4477          private com.google.protobuf.SingleFieldBuilder<
4478              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
4479          /**
4480           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4481           */
4482          public boolean hasReqInfo() {
4483            return ((bitField0_ & 0x00000001) == 0x00000001);
4484          }
4485          /**
4486           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4487           */
4488          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4489            if (reqInfoBuilder_ == null) {
4490              return reqInfo_;
4491            } else {
4492              return reqInfoBuilder_.getMessage();
4493            }
4494          }
4495          /**
4496           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4497           */
4498          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4499            if (reqInfoBuilder_ == null) {
4500              if (value == null) {
4501                throw new NullPointerException();
4502              }
4503              reqInfo_ = value;
4504              onChanged();
4505            } else {
4506              reqInfoBuilder_.setMessage(value);
4507            }
4508            bitField0_ |= 0x00000001;
4509            return this;
4510          }
4511          /**
4512           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4513           */
4514          public Builder setReqInfo(
4515              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
4516            if (reqInfoBuilder_ == null) {
4517              reqInfo_ = builderForValue.build();
4518              onChanged();
4519            } else {
4520              reqInfoBuilder_.setMessage(builderForValue.build());
4521            }
4522            bitField0_ |= 0x00000001;
4523            return this;
4524          }
4525          /**
4526           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4527           */
4528          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4529            if (reqInfoBuilder_ == null) {
4530              if (((bitField0_ & 0x00000001) == 0x00000001) &&
4531                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
4532                reqInfo_ =
4533                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
4534              } else {
4535                reqInfo_ = value;
4536              }
4537              onChanged();
4538            } else {
4539              reqInfoBuilder_.mergeFrom(value);
4540            }
4541            bitField0_ |= 0x00000001;
4542            return this;
4543          }
4544          /**
4545           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4546           */
4547          public Builder clearReqInfo() {
4548            if (reqInfoBuilder_ == null) {
4549              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4550              onChanged();
4551            } else {
4552              reqInfoBuilder_.clear();
4553            }
4554            bitField0_ = (bitField0_ & ~0x00000001);
4555            return this;
4556          }
4557          /**
4558           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4559           */
4560          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
4561            bitField0_ |= 0x00000001;
4562            onChanged();
4563            return getReqInfoFieldBuilder().getBuilder();
4564          }
4565          /**
4566           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4567           */
4568          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4569            if (reqInfoBuilder_ != null) {
4570              return reqInfoBuilder_.getMessageOrBuilder();
4571            } else {
4572              return reqInfo_;
4573            }
4574          }
4575          /**
4576           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4577           */
4578          private com.google.protobuf.SingleFieldBuilder<
4579              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
4580              getReqInfoFieldBuilder() {
4581            if (reqInfoBuilder_ == null) {
4582              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
4583                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
4584                      reqInfo_,
4585                      getParentForChildren(),
4586                      isClean());
4587              reqInfo_ = null;
4588            }
4589            return reqInfoBuilder_;
4590          }
4591    
4592          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatRequestProto)
4593        }
4594    
4595        static {
4596          defaultInstance = new HeartbeatRequestProto(true);
4597          defaultInstance.initFields();
4598        }
4599    
4600        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatRequestProto)
4601      }
4602    
4603      public interface HeartbeatResponseProtoOrBuilder
4604          extends com.google.protobuf.MessageOrBuilder {
4605      }
4606      /**
4607       * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4608       *
4609       * <pre>
4610       * void response
4611       * </pre>
4612       */
4613      public static final class HeartbeatResponseProto extends
4614          com.google.protobuf.GeneratedMessage
4615          implements HeartbeatResponseProtoOrBuilder {
4616        // Use HeartbeatResponseProto.newBuilder() to construct.
4617        private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4618          super(builder);
4619          this.unknownFields = builder.getUnknownFields();
4620        }
4621        private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4622    
4623        private static final HeartbeatResponseProto defaultInstance;
4624        public static HeartbeatResponseProto getDefaultInstance() {
4625          return defaultInstance;
4626        }
4627    
4628        public HeartbeatResponseProto getDefaultInstanceForType() {
4629          return defaultInstance;
4630        }
4631    
4632        private final com.google.protobuf.UnknownFieldSet unknownFields;
4633        @java.lang.Override
4634        public final com.google.protobuf.UnknownFieldSet
4635            getUnknownFields() {
4636          return this.unknownFields;
4637        }
4638        private HeartbeatResponseProto(
4639            com.google.protobuf.CodedInputStream input,
4640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4641            throws com.google.protobuf.InvalidProtocolBufferException {
4642          initFields();
4643          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4644              com.google.protobuf.UnknownFieldSet.newBuilder();
4645          try {
4646            boolean done = false;
4647            while (!done) {
4648              int tag = input.readTag();
4649              switch (tag) {
4650                case 0:
4651                  done = true;
4652                  break;
4653                default: {
4654                  if (!parseUnknownField(input, unknownFields,
4655                                         extensionRegistry, tag)) {
4656                    done = true;
4657                  }
4658                  break;
4659                }
4660              }
4661            }
4662          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4663            throw e.setUnfinishedMessage(this);
4664          } catch (java.io.IOException e) {
4665            throw new com.google.protobuf.InvalidProtocolBufferException(
4666                e.getMessage()).setUnfinishedMessage(this);
4667          } finally {
4668            this.unknownFields = unknownFields.build();
4669            makeExtensionsImmutable();
4670          }
4671        }
4672        public static final com.google.protobuf.Descriptors.Descriptor
4673            getDescriptor() {
4674          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4675        }
4676    
4677        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4678            internalGetFieldAccessorTable() {
4679          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4680              .ensureFieldAccessorsInitialized(
4681                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4682        }
4683    
4684        public static com.google.protobuf.Parser<HeartbeatResponseProto> PARSER =
4685            new com.google.protobuf.AbstractParser<HeartbeatResponseProto>() {
4686          public HeartbeatResponseProto parsePartialFrom(
4687              com.google.protobuf.CodedInputStream input,
4688              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4689              throws com.google.protobuf.InvalidProtocolBufferException {
4690            return new HeartbeatResponseProto(input, extensionRegistry);
4691          }
4692        };
4693    
4694        @java.lang.Override
4695        public com.google.protobuf.Parser<HeartbeatResponseProto> getParserForType() {
4696          return PARSER;
4697        }
4698    
4699        private void initFields() {
4700        }
4701        private byte memoizedIsInitialized = -1;
4702        public final boolean isInitialized() {
4703          byte isInitialized = memoizedIsInitialized;
4704          if (isInitialized != -1) return isInitialized == 1;
4705    
4706          memoizedIsInitialized = 1;
4707          return true;
4708        }
4709    
4710        public void writeTo(com.google.protobuf.CodedOutputStream output)
4711                            throws java.io.IOException {
4712          getSerializedSize();
4713          getUnknownFields().writeTo(output);
4714        }
4715    
4716        private int memoizedSerializedSize = -1;
4717        public int getSerializedSize() {
4718          int size = memoizedSerializedSize;
4719          if (size != -1) return size;
4720    
4721          size = 0;
4722          size += getUnknownFields().getSerializedSize();
4723          memoizedSerializedSize = size;
4724          return size;
4725        }
4726    
4727        private static final long serialVersionUID = 0L;
4728        @java.lang.Override
4729        protected java.lang.Object writeReplace()
4730            throws java.io.ObjectStreamException {
4731          return super.writeReplace();
4732        }
4733    
4734        @java.lang.Override
4735        public boolean equals(final java.lang.Object obj) {
4736          if (obj == this) {
4737           return true;
4738          }
4739          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
4740            return super.equals(obj);
4741          }
4742          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
4743    
4744          boolean result = true;
4745          result = result &&
4746              getUnknownFields().equals(other.getUnknownFields());
4747          return result;
4748        }
4749    
4750        private int memoizedHashCode = 0;
4751        @java.lang.Override
4752        public int hashCode() {
4753          if (memoizedHashCode != 0) {
4754            return memoizedHashCode;
4755          }
4756          int hash = 41;
4757          hash = (19 * hash) + getDescriptorForType().hashCode();
4758          hash = (29 * hash) + getUnknownFields().hashCode();
4759          memoizedHashCode = hash;
4760          return hash;
4761        }
4762    
4763        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4764            com.google.protobuf.ByteString data)
4765            throws com.google.protobuf.InvalidProtocolBufferException {
4766          return PARSER.parseFrom(data);
4767        }
4768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4769            com.google.protobuf.ByteString data,
4770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4771            throws com.google.protobuf.InvalidProtocolBufferException {
4772          return PARSER.parseFrom(data, extensionRegistry);
4773        }
4774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
4775            throws com.google.protobuf.InvalidProtocolBufferException {
4776          return PARSER.parseFrom(data);
4777        }
4778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4779            byte[] data,
4780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4781            throws com.google.protobuf.InvalidProtocolBufferException {
4782          return PARSER.parseFrom(data, extensionRegistry);
4783        }
4784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
4785            throws java.io.IOException {
4786          return PARSER.parseFrom(input);
4787        }
4788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4789            java.io.InputStream input,
4790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4791            throws java.io.IOException {
4792          return PARSER.parseFrom(input, extensionRegistry);
4793        }
4794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
4795            throws java.io.IOException {
4796          return PARSER.parseDelimitedFrom(input);
4797        }
4798        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
4799            java.io.InputStream input,
4800            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4801            throws java.io.IOException {
4802          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4803        }
4804        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4805            com.google.protobuf.CodedInputStream input)
4806            throws java.io.IOException {
4807          return PARSER.parseFrom(input);
4808        }
4809        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4810            com.google.protobuf.CodedInputStream input,
4811            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4812            throws java.io.IOException {
4813          return PARSER.parseFrom(input, extensionRegistry);
4814        }
4815    
4816        public static Builder newBuilder() { return Builder.create(); }
4817        public Builder newBuilderForType() { return newBuilder(); }
4818        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
4819          return newBuilder().mergeFrom(prototype);
4820        }
4821        public Builder toBuilder() { return newBuilder(this); }
4822    
4823        @java.lang.Override
4824        protected Builder newBuilderForType(
4825            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4826          Builder builder = new Builder(parent);
4827          return builder;
4828        }
4829        /**
4830         * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4831         *
4832         * <pre>
4833         * void response
4834         * </pre>
4835         */
4836        public static final class Builder extends
4837            com.google.protobuf.GeneratedMessage.Builder<Builder>
4838           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
4839          public static final com.google.protobuf.Descriptors.Descriptor
4840              getDescriptor() {
4841            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4842          }
4843    
4844          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4845              internalGetFieldAccessorTable() {
4846            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4847                .ensureFieldAccessorsInitialized(
4848                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4849          }
4850    
4851          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
4852          private Builder() {
4853            maybeForceBuilderInitialization();
4854          }
4855    
4856          private Builder(
4857              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4858            super(parent);
4859            maybeForceBuilderInitialization();
4860          }
4861          private void maybeForceBuilderInitialization() {
4862            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4863            }
4864          }
4865          private static Builder create() {
4866            return new Builder();
4867          }
4868    
4869          public Builder clear() {
4870            super.clear();
4871            return this;
4872          }
4873    
4874          public Builder clone() {
4875            return create().mergeFrom(buildPartial());
4876          }
4877    
4878          public com.google.protobuf.Descriptors.Descriptor
4879              getDescriptorForType() {
4880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4881          }
4882    
4883          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
4884            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
4885          }
4886    
4887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
4888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
4889            if (!result.isInitialized()) {
4890              throw newUninitializedMessageException(result);
4891            }
4892            return result;
4893          }
4894    
4895          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
4896            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
4897            onBuilt();
4898            return result;
4899          }
4900    
4901          public Builder mergeFrom(com.google.protobuf.Message other) {
4902            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
4903              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
4904            } else {
4905              super.mergeFrom(other);
4906              return this;
4907            }
4908          }
4909    
4910          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
4911            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
4912            this.mergeUnknownFields(other.getUnknownFields());
4913            return this;
4914          }
4915    
4916          public final boolean isInitialized() {
4917            return true;
4918          }
4919    
4920          public Builder mergeFrom(
4921              com.google.protobuf.CodedInputStream input,
4922              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4923              throws java.io.IOException {
4924            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parsedMessage = null;
4925            try {
4926              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4927            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4928              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage();
4929              throw e;
4930            } finally {
4931              if (parsedMessage != null) {
4932                mergeFrom(parsedMessage);
4933              }
4934            }
4935            return this;
4936          }
4937    
4938          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatResponseProto)
4939        }
4940    
4941        static {
4942          defaultInstance = new HeartbeatResponseProto(true);
4943          defaultInstance.initFields();
4944        }
4945    
4946        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatResponseProto)
4947      }
4948    
4949      public interface StartLogSegmentRequestProtoOrBuilder
4950          extends com.google.protobuf.MessageOrBuilder {
4951    
4952        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4953        /**
4954         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4955         */
4956        boolean hasReqInfo();
4957        /**
4958         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4959         */
4960        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4961        /**
4962         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4963         */
4964        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4965    
4966        // required uint64 txid = 2;
4967        /**
4968         * <code>required uint64 txid = 2;</code>
4969         *
4970         * <pre>
4971         * Transaction ID
4972         * </pre>
4973         */
4974        boolean hasTxid();
4975        /**
4976         * <code>required uint64 txid = 2;</code>
4977         *
4978         * <pre>
4979         * Transaction ID
4980         * </pre>
4981         */
4982        long getTxid();
4983      }
4984      /**
4985       * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
4986       *
4987       * <pre>
4988       **
4989       * startLogSegment()
4990       * </pre>
4991       */
4992      public static final class StartLogSegmentRequestProto extends
4993          com.google.protobuf.GeneratedMessage
4994          implements StartLogSegmentRequestProtoOrBuilder {
4995        // Use StartLogSegmentRequestProto.newBuilder() to construct.
4996        private StartLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4997          super(builder);
4998          this.unknownFields = builder.getUnknownFields();
4999        }
5000        private StartLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5001    
5002        private static final StartLogSegmentRequestProto defaultInstance;
5003        public static StartLogSegmentRequestProto getDefaultInstance() {
5004          return defaultInstance;
5005        }
5006    
5007        public StartLogSegmentRequestProto getDefaultInstanceForType() {
5008          return defaultInstance;
5009        }
5010    
5011        private final com.google.protobuf.UnknownFieldSet unknownFields;
5012        @java.lang.Override
5013        public final com.google.protobuf.UnknownFieldSet
5014            getUnknownFields() {
5015          return this.unknownFields;
5016        }
5017        private StartLogSegmentRequestProto(
5018            com.google.protobuf.CodedInputStream input,
5019            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5020            throws com.google.protobuf.InvalidProtocolBufferException {
5021          initFields();
5022          int mutable_bitField0_ = 0;
5023          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5024              com.google.protobuf.UnknownFieldSet.newBuilder();
5025          try {
5026            boolean done = false;
5027            while (!done) {
5028              int tag = input.readTag();
5029              switch (tag) {
5030                case 0:
5031                  done = true;
5032                  break;
5033                default: {
5034                  if (!parseUnknownField(input, unknownFields,
5035                                         extensionRegistry, tag)) {
5036                    done = true;
5037                  }
5038                  break;
5039                }
5040                case 10: {
5041                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
5042                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
5043                    subBuilder = reqInfo_.toBuilder();
5044                  }
5045                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
5046                  if (subBuilder != null) {
5047                    subBuilder.mergeFrom(reqInfo_);
5048                    reqInfo_ = subBuilder.buildPartial();
5049                  }
5050                  bitField0_ |= 0x00000001;
5051                  break;
5052                }
5053                case 16: {
5054                  bitField0_ |= 0x00000002;
5055                  txid_ = input.readUInt64();
5056                  break;
5057                }
5058              }
5059            }
5060          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5061            throw e.setUnfinishedMessage(this);
5062          } catch (java.io.IOException e) {
5063            throw new com.google.protobuf.InvalidProtocolBufferException(
5064                e.getMessage()).setUnfinishedMessage(this);
5065          } finally {
5066            this.unknownFields = unknownFields.build();
5067            makeExtensionsImmutable();
5068          }
5069        }
5070        public static final com.google.protobuf.Descriptors.Descriptor
5071            getDescriptor() {
5072          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5073        }
5074    
5075        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5076            internalGetFieldAccessorTable() {
5077          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5078              .ensureFieldAccessorsInitialized(
5079                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5080        }
5081    
5082        public static com.google.protobuf.Parser<StartLogSegmentRequestProto> PARSER =
5083            new com.google.protobuf.AbstractParser<StartLogSegmentRequestProto>() {
5084          public StartLogSegmentRequestProto parsePartialFrom(
5085              com.google.protobuf.CodedInputStream input,
5086              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5087              throws com.google.protobuf.InvalidProtocolBufferException {
5088            return new StartLogSegmentRequestProto(input, extensionRegistry);
5089          }
5090        };
5091    
5092        @java.lang.Override
5093        public com.google.protobuf.Parser<StartLogSegmentRequestProto> getParserForType() {
5094          return PARSER;
5095        }
5096    
5097        private int bitField0_;
5098        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5099        public static final int REQINFO_FIELD_NUMBER = 1;
5100        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
5101        /**
5102         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5103         */
5104        public boolean hasReqInfo() {
5105          return ((bitField0_ & 0x00000001) == 0x00000001);
5106        }
5107        /**
5108         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5109         */
5110        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5111          return reqInfo_;
5112        }
5113        /**
5114         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5115         */
5116        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5117          return reqInfo_;
5118        }
5119    
5120        // required uint64 txid = 2;
5121        public static final int TXID_FIELD_NUMBER = 2;
5122        private long txid_;
5123        /**
5124         * <code>required uint64 txid = 2;</code>
5125         *
5126         * <pre>
5127         * Transaction ID
5128         * </pre>
5129         */
5130        public boolean hasTxid() {
5131          return ((bitField0_ & 0x00000002) == 0x00000002);
5132        }
5133        /**
5134         * <code>required uint64 txid = 2;</code>
5135         *
5136         * <pre>
5137         * Transaction ID
5138         * </pre>
5139         */
5140        public long getTxid() {
5141          return txid_;
5142        }
5143    
5144        private void initFields() {
5145          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5146          txid_ = 0L;
5147        }
5148        private byte memoizedIsInitialized = -1;
5149        public final boolean isInitialized() {
5150          byte isInitialized = memoizedIsInitialized;
5151          if (isInitialized != -1) return isInitialized == 1;
5152    
5153          if (!hasReqInfo()) {
5154            memoizedIsInitialized = 0;
5155            return false;
5156          }
5157          if (!hasTxid()) {
5158            memoizedIsInitialized = 0;
5159            return false;
5160          }
5161          if (!getReqInfo().isInitialized()) {
5162            memoizedIsInitialized = 0;
5163            return false;
5164          }
5165          memoizedIsInitialized = 1;
5166          return true;
5167        }
5168    
5169        public void writeTo(com.google.protobuf.CodedOutputStream output)
5170                            throws java.io.IOException {
5171          getSerializedSize();
5172          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5173            output.writeMessage(1, reqInfo_);
5174          }
5175          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5176            output.writeUInt64(2, txid_);
5177          }
5178          getUnknownFields().writeTo(output);
5179        }
5180    
5181        private int memoizedSerializedSize = -1;
5182        public int getSerializedSize() {
5183          int size = memoizedSerializedSize;
5184          if (size != -1) return size;
5185    
5186          size = 0;
5187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5188            size += com.google.protobuf.CodedOutputStream
5189              .computeMessageSize(1, reqInfo_);
5190          }
5191          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5192            size += com.google.protobuf.CodedOutputStream
5193              .computeUInt64Size(2, txid_);
5194          }
5195          size += getUnknownFields().getSerializedSize();
5196          memoizedSerializedSize = size;
5197          return size;
5198        }
5199    
5200        private static final long serialVersionUID = 0L;
5201        @java.lang.Override
5202        protected java.lang.Object writeReplace()
5203            throws java.io.ObjectStreamException {
5204          return super.writeReplace();
5205        }
5206    
5207        @java.lang.Override
5208        public boolean equals(final java.lang.Object obj) {
5209          if (obj == this) {
5210           return true;
5211          }
5212          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
5213            return super.equals(obj);
5214          }
5215          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
5216    
5217          boolean result = true;
5218          result = result && (hasReqInfo() == other.hasReqInfo());
5219          if (hasReqInfo()) {
5220            result = result && getReqInfo()
5221                .equals(other.getReqInfo());
5222          }
5223          result = result && (hasTxid() == other.hasTxid());
5224          if (hasTxid()) {
5225            result = result && (getTxid()
5226                == other.getTxid());
5227          }
5228          result = result &&
5229              getUnknownFields().equals(other.getUnknownFields());
5230          return result;
5231        }
5232    
5233        private int memoizedHashCode = 0;
5234        @java.lang.Override
5235        public int hashCode() {
5236          if (memoizedHashCode != 0) {
5237            return memoizedHashCode;
5238          }
5239          int hash = 41;
5240          hash = (19 * hash) + getDescriptorForType().hashCode();
5241          if (hasReqInfo()) {
5242            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5243            hash = (53 * hash) + getReqInfo().hashCode();
5244          }
5245          if (hasTxid()) {
5246            hash = (37 * hash) + TXID_FIELD_NUMBER;
5247            hash = (53 * hash) + hashLong(getTxid());
5248          }
5249          hash = (29 * hash) + getUnknownFields().hashCode();
5250          memoizedHashCode = hash;
5251          return hash;
5252        }
5253    
5254        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5255            com.google.protobuf.ByteString data)
5256            throws com.google.protobuf.InvalidProtocolBufferException {
5257          return PARSER.parseFrom(data);
5258        }
5259        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5260            com.google.protobuf.ByteString data,
5261            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5262            throws com.google.protobuf.InvalidProtocolBufferException {
5263          return PARSER.parseFrom(data, extensionRegistry);
5264        }
5265        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
5266            throws com.google.protobuf.InvalidProtocolBufferException {
5267          return PARSER.parseFrom(data);
5268        }
5269        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5270            byte[] data,
5271            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5272            throws com.google.protobuf.InvalidProtocolBufferException {
5273          return PARSER.parseFrom(data, extensionRegistry);
5274        }
5275        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
5276            throws java.io.IOException {
5277          return PARSER.parseFrom(input);
5278        }
5279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5280            java.io.InputStream input,
5281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5282            throws java.io.IOException {
5283          return PARSER.parseFrom(input, extensionRegistry);
5284        }
5285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
5286            throws java.io.IOException {
5287          return PARSER.parseDelimitedFrom(input);
5288        }
5289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
5290            java.io.InputStream input,
5291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5292            throws java.io.IOException {
5293          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5294        }
5295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5296            com.google.protobuf.CodedInputStream input)
5297            throws java.io.IOException {
5298          return PARSER.parseFrom(input);
5299        }
5300        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5301            com.google.protobuf.CodedInputStream input,
5302            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5303            throws java.io.IOException {
5304          return PARSER.parseFrom(input, extensionRegistry);
5305        }
5306    
5307        public static Builder newBuilder() { return Builder.create(); }
5308        public Builder newBuilderForType() { return newBuilder(); }
5309        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
5310          return newBuilder().mergeFrom(prototype);
5311        }
5312        public Builder toBuilder() { return newBuilder(this); }
5313    
5314        @java.lang.Override
5315        protected Builder newBuilderForType(
5316            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5317          Builder builder = new Builder(parent);
5318          return builder;
5319        }
5320        /**
5321         * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
5322         *
5323         * <pre>
5324         **
5325         * startLogSegment()
5326         * </pre>
5327         */
5328        public static final class Builder extends
5329            com.google.protobuf.GeneratedMessage.Builder<Builder>
5330           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
5331          public static final com.google.protobuf.Descriptors.Descriptor
5332              getDescriptor() {
5333            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5334          }
5335    
5336          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5337              internalGetFieldAccessorTable() {
5338            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5339                .ensureFieldAccessorsInitialized(
5340                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5341          }
5342    
5343          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
5344          private Builder() {
5345            maybeForceBuilderInitialization();
5346          }
5347    
5348          private Builder(
5349              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5350            super(parent);
5351            maybeForceBuilderInitialization();
5352          }
5353          private void maybeForceBuilderInitialization() {
5354            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5355              getReqInfoFieldBuilder();
5356            }
5357          }
5358          private static Builder create() {
5359            return new Builder();
5360          }
5361    
5362          public Builder clear() {
5363            super.clear();
5364            if (reqInfoBuilder_ == null) {
5365              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5366            } else {
5367              reqInfoBuilder_.clear();
5368            }
5369            bitField0_ = (bitField0_ & ~0x00000001);
5370            txid_ = 0L;
5371            bitField0_ = (bitField0_ & ~0x00000002);
5372            return this;
5373          }
5374    
5375          public Builder clone() {
5376            return create().mergeFrom(buildPartial());
5377          }
5378    
5379          public com.google.protobuf.Descriptors.Descriptor
5380              getDescriptorForType() {
5381            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5382          }
5383    
5384          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
5385            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
5386          }
5387    
5388          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
5389            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
5390            if (!result.isInitialized()) {
5391              throw newUninitializedMessageException(result);
5392            }
5393            return result;
5394          }
5395    
5396          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
5397            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
5398            int from_bitField0_ = bitField0_;
5399            int to_bitField0_ = 0;
5400            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5401              to_bitField0_ |= 0x00000001;
5402            }
5403            if (reqInfoBuilder_ == null) {
5404              result.reqInfo_ = reqInfo_;
5405            } else {
5406              result.reqInfo_ = reqInfoBuilder_.build();
5407            }
5408            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
5409              to_bitField0_ |= 0x00000002;
5410            }
5411            result.txid_ = txid_;
5412            result.bitField0_ = to_bitField0_;
5413            onBuilt();
5414            return result;
5415          }
5416    
5417          public Builder mergeFrom(com.google.protobuf.Message other) {
5418            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
5419              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
5420            } else {
5421              super.mergeFrom(other);
5422              return this;
5423            }
5424          }
5425    
5426          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
5427            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
5428            if (other.hasReqInfo()) {
5429              mergeReqInfo(other.getReqInfo());
5430            }
5431            if (other.hasTxid()) {
5432              setTxid(other.getTxid());
5433            }
5434            this.mergeUnknownFields(other.getUnknownFields());
5435            return this;
5436          }
5437    
5438          public final boolean isInitialized() {
5439            if (!hasReqInfo()) {
5440              
5441              return false;
5442            }
5443            if (!hasTxid()) {
5444              
5445              return false;
5446            }
5447            if (!getReqInfo().isInitialized()) {
5448              
5449              return false;
5450            }
5451            return true;
5452          }
5453    
5454          public Builder mergeFrom(
5455              com.google.protobuf.CodedInputStream input,
5456              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5457              throws java.io.IOException {
5458            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parsedMessage = null;
5459            try {
5460              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5461            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5462              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) e.getUnfinishedMessage();
5463              throw e;
5464            } finally {
5465              if (parsedMessage != null) {
5466                mergeFrom(parsedMessage);
5467              }
5468            }
5469            return this;
5470          }
5471          private int bitField0_;
5472    
5473          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5474          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5475          private com.google.protobuf.SingleFieldBuilder<
5476              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
5477          /**
5478           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5479           */
5480          public boolean hasReqInfo() {
5481            return ((bitField0_ & 0x00000001) == 0x00000001);
5482          }
5483          /**
5484           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5485           */
5486          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5487            if (reqInfoBuilder_ == null) {
5488              return reqInfo_;
5489            } else {
5490              return reqInfoBuilder_.getMessage();
5491            }
5492          }
5493          /**
5494           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5495           */
5496          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5497            if (reqInfoBuilder_ == null) {
5498              if (value == null) {
5499                throw new NullPointerException();
5500              }
5501              reqInfo_ = value;
5502              onChanged();
5503            } else {
5504              reqInfoBuilder_.setMessage(value);
5505            }
5506            bitField0_ |= 0x00000001;
5507            return this;
5508          }
5509          /**
5510           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5511           */
5512          public Builder setReqInfo(
5513              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
5514            if (reqInfoBuilder_ == null) {
5515              reqInfo_ = builderForValue.build();
5516              onChanged();
5517            } else {
5518              reqInfoBuilder_.setMessage(builderForValue.build());
5519            }
5520            bitField0_ |= 0x00000001;
5521            return this;
5522          }
5523          /**
5524           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5525           */
5526          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5527            if (reqInfoBuilder_ == null) {
5528              if (((bitField0_ & 0x00000001) == 0x00000001) &&
5529                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
5530                reqInfo_ =
5531                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
5532              } else {
5533                reqInfo_ = value;
5534              }
5535              onChanged();
5536            } else {
5537              reqInfoBuilder_.mergeFrom(value);
5538            }
5539            bitField0_ |= 0x00000001;
5540            return this;
5541          }
5542          /**
5543           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5544           */
5545          public Builder clearReqInfo() {
5546            if (reqInfoBuilder_ == null) {
5547              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5548              onChanged();
5549            } else {
5550              reqInfoBuilder_.clear();
5551            }
5552            bitField0_ = (bitField0_ & ~0x00000001);
5553            return this;
5554          }
5555          /**
5556           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5557           */
5558          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
5559            bitField0_ |= 0x00000001;
5560            onChanged();
5561            return getReqInfoFieldBuilder().getBuilder();
5562          }
5563          /**
5564           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5565           */
5566          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5567            if (reqInfoBuilder_ != null) {
5568              return reqInfoBuilder_.getMessageOrBuilder();
5569            } else {
5570              return reqInfo_;
5571            }
5572          }
5573          /**
5574           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5575           */
5576          private com.google.protobuf.SingleFieldBuilder<
5577              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
5578              getReqInfoFieldBuilder() {
5579            if (reqInfoBuilder_ == null) {
5580              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5581                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
5582                      reqInfo_,
5583                      getParentForChildren(),
5584                      isClean());
5585              reqInfo_ = null;
5586            }
5587            return reqInfoBuilder_;
5588          }
5589    
5590          // required uint64 txid = 2;
5591          private long txid_ ;
5592          /**
5593           * <code>required uint64 txid = 2;</code>
5594           *
5595           * <pre>
5596           * Transaction ID
5597           * </pre>
5598           */
5599          public boolean hasTxid() {
5600            return ((bitField0_ & 0x00000002) == 0x00000002);
5601          }
5602          /**
5603           * <code>required uint64 txid = 2;</code>
5604           *
5605           * <pre>
5606           * Transaction ID
5607           * </pre>
5608           */
5609          public long getTxid() {
5610            return txid_;
5611          }
5612          /**
5613           * <code>required uint64 txid = 2;</code>
5614           *
5615           * <pre>
5616           * Transaction ID
5617           * </pre>
5618           */
5619          public Builder setTxid(long value) {
5620            bitField0_ |= 0x00000002;
5621            txid_ = value;
5622            onChanged();
5623            return this;
5624          }
5625          /**
5626           * <code>required uint64 txid = 2;</code>
5627           *
5628           * <pre>
5629           * Transaction ID
5630           * </pre>
5631           */
5632          public Builder clearTxid() {
5633            bitField0_ = (bitField0_ & ~0x00000002);
5634            txid_ = 0L;
5635            onChanged();
5636            return this;
5637          }
5638    
5639          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5640        }
5641    
5642        static {
5643          defaultInstance = new StartLogSegmentRequestProto(true);
5644          defaultInstance.initFields();
5645        }
5646    
5647        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5648      }
5649    
5650      public interface StartLogSegmentResponseProtoOrBuilder
5651          extends com.google.protobuf.MessageOrBuilder {
5652      }
5653      /**
5654       * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5655       */
5656      public static final class StartLogSegmentResponseProto extends
5657          com.google.protobuf.GeneratedMessage
5658          implements StartLogSegmentResponseProtoOrBuilder {
5659        // Use StartLogSegmentResponseProto.newBuilder() to construct.
5660        private StartLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5661          super(builder);
5662          this.unknownFields = builder.getUnknownFields();
5663        }
5664        private StartLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5665    
5666        private static final StartLogSegmentResponseProto defaultInstance;
5667        public static StartLogSegmentResponseProto getDefaultInstance() {
5668          return defaultInstance;
5669        }
5670    
5671        public StartLogSegmentResponseProto getDefaultInstanceForType() {
5672          return defaultInstance;
5673        }
5674    
5675        private final com.google.protobuf.UnknownFieldSet unknownFields;
5676        @java.lang.Override
5677        public final com.google.protobuf.UnknownFieldSet
5678            getUnknownFields() {
5679          return this.unknownFields;
5680        }
5681        private StartLogSegmentResponseProto(
5682            com.google.protobuf.CodedInputStream input,
5683            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5684            throws com.google.protobuf.InvalidProtocolBufferException {
5685          initFields();
5686          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5687              com.google.protobuf.UnknownFieldSet.newBuilder();
5688          try {
5689            boolean done = false;
5690            while (!done) {
5691              int tag = input.readTag();
5692              switch (tag) {
5693                case 0:
5694                  done = true;
5695                  break;
5696                default: {
5697                  if (!parseUnknownField(input, unknownFields,
5698                                         extensionRegistry, tag)) {
5699                    done = true;
5700                  }
5701                  break;
5702                }
5703              }
5704            }
5705          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5706            throw e.setUnfinishedMessage(this);
5707          } catch (java.io.IOException e) {
5708            throw new com.google.protobuf.InvalidProtocolBufferException(
5709                e.getMessage()).setUnfinishedMessage(this);
5710          } finally {
5711            this.unknownFields = unknownFields.build();
5712            makeExtensionsImmutable();
5713          }
5714        }
5715        public static final com.google.protobuf.Descriptors.Descriptor
5716            getDescriptor() {
5717          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5718        }
5719    
5720        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5721            internalGetFieldAccessorTable() {
5722          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5723              .ensureFieldAccessorsInitialized(
5724                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5725        }
5726    
5727        public static com.google.protobuf.Parser<StartLogSegmentResponseProto> PARSER =
5728            new com.google.protobuf.AbstractParser<StartLogSegmentResponseProto>() {
5729          public StartLogSegmentResponseProto parsePartialFrom(
5730              com.google.protobuf.CodedInputStream input,
5731              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5732              throws com.google.protobuf.InvalidProtocolBufferException {
5733            return new StartLogSegmentResponseProto(input, extensionRegistry);
5734          }
5735        };
5736    
5737        @java.lang.Override
5738        public com.google.protobuf.Parser<StartLogSegmentResponseProto> getParserForType() {
5739          return PARSER;
5740        }
5741    
5742        private void initFields() {
5743        }
5744        private byte memoizedIsInitialized = -1;
5745        public final boolean isInitialized() {
5746          byte isInitialized = memoizedIsInitialized;
5747          if (isInitialized != -1) return isInitialized == 1;
5748    
5749          memoizedIsInitialized = 1;
5750          return true;
5751        }
5752    
5753        public void writeTo(com.google.protobuf.CodedOutputStream output)
5754                            throws java.io.IOException {
5755          getSerializedSize();
5756          getUnknownFields().writeTo(output);
5757        }
5758    
5759        private int memoizedSerializedSize = -1;
5760        public int getSerializedSize() {
5761          int size = memoizedSerializedSize;
5762          if (size != -1) return size;
5763    
5764          size = 0;
5765          size += getUnknownFields().getSerializedSize();
5766          memoizedSerializedSize = size;
5767          return size;
5768        }
5769    
5770        private static final long serialVersionUID = 0L;
5771        @java.lang.Override
5772        protected java.lang.Object writeReplace()
5773            throws java.io.ObjectStreamException {
5774          return super.writeReplace();
5775        }
5776    
5777        @java.lang.Override
5778        public boolean equals(final java.lang.Object obj) {
5779          if (obj == this) {
5780           return true;
5781          }
5782          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
5783            return super.equals(obj);
5784          }
5785          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
5786    
5787          boolean result = true;
5788          result = result &&
5789              getUnknownFields().equals(other.getUnknownFields());
5790          return result;
5791        }
5792    
5793        private int memoizedHashCode = 0;
5794        @java.lang.Override
5795        public int hashCode() {
5796          if (memoizedHashCode != 0) {
5797            return memoizedHashCode;
5798          }
5799          int hash = 41;
5800          hash = (19 * hash) + getDescriptorForType().hashCode();
5801          hash = (29 * hash) + getUnknownFields().hashCode();
5802          memoizedHashCode = hash;
5803          return hash;
5804        }
5805    
5806        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5807            com.google.protobuf.ByteString data)
5808            throws com.google.protobuf.InvalidProtocolBufferException {
5809          return PARSER.parseFrom(data);
5810        }
5811        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5812            com.google.protobuf.ByteString data,
5813            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5814            throws com.google.protobuf.InvalidProtocolBufferException {
5815          return PARSER.parseFrom(data, extensionRegistry);
5816        }
5817        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
5818            throws com.google.protobuf.InvalidProtocolBufferException {
5819          return PARSER.parseFrom(data);
5820        }
5821        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5822            byte[] data,
5823            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5824            throws com.google.protobuf.InvalidProtocolBufferException {
5825          return PARSER.parseFrom(data, extensionRegistry);
5826        }
5827        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
5828            throws java.io.IOException {
5829          return PARSER.parseFrom(input);
5830        }
5831        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5832            java.io.InputStream input,
5833            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5834            throws java.io.IOException {
5835          return PARSER.parseFrom(input, extensionRegistry);
5836        }
5837        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
5838            throws java.io.IOException {
5839          return PARSER.parseDelimitedFrom(input);
5840        }
5841        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
5842            java.io.InputStream input,
5843            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5844            throws java.io.IOException {
5845          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5846        }
5847        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5848            com.google.protobuf.CodedInputStream input)
5849            throws java.io.IOException {
5850          return PARSER.parseFrom(input);
5851        }
5852        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5853            com.google.protobuf.CodedInputStream input,
5854            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5855            throws java.io.IOException {
5856          return PARSER.parseFrom(input, extensionRegistry);
5857        }
5858    
5859        public static Builder newBuilder() { return Builder.create(); }
5860        public Builder newBuilderForType() { return newBuilder(); }
5861        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
5862          return newBuilder().mergeFrom(prototype);
5863        }
5864        public Builder toBuilder() { return newBuilder(this); }
5865    
5866        @java.lang.Override
5867        protected Builder newBuilderForType(
5868            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5869          Builder builder = new Builder(parent);
5870          return builder;
5871        }
5872        /**
5873         * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5874         */
5875        public static final class Builder extends
5876            com.google.protobuf.GeneratedMessage.Builder<Builder>
5877           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
5878          public static final com.google.protobuf.Descriptors.Descriptor
5879              getDescriptor() {
5880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5881          }
5882    
5883          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5884              internalGetFieldAccessorTable() {
5885            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5886                .ensureFieldAccessorsInitialized(
5887                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5888          }
5889    
5890          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
5891          private Builder() {
5892            maybeForceBuilderInitialization();
5893          }
5894    
5895          private Builder(
5896              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5897            super(parent);
5898            maybeForceBuilderInitialization();
5899          }
5900          private void maybeForceBuilderInitialization() {
5901            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5902            }
5903          }
5904          private static Builder create() {
5905            return new Builder();
5906          }
5907    
5908          public Builder clear() {
5909            super.clear();
5910            return this;
5911          }
5912    
5913          public Builder clone() {
5914            return create().mergeFrom(buildPartial());
5915          }
5916    
5917          public com.google.protobuf.Descriptors.Descriptor
5918              getDescriptorForType() {
5919            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5920          }
5921    
5922          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
5923            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
5924          }
5925    
5926          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
5927            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
5928            if (!result.isInitialized()) {
5929              throw newUninitializedMessageException(result);
5930            }
5931            return result;
5932          }
5933    
5934          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
5935            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
5936            onBuilt();
5937            return result;
5938          }
5939    
5940          public Builder mergeFrom(com.google.protobuf.Message other) {
5941            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
5942              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
5943            } else {
5944              super.mergeFrom(other);
5945              return this;
5946            }
5947          }
5948    
5949          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
5950            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
5951            this.mergeUnknownFields(other.getUnknownFields());
5952            return this;
5953          }
5954    
5955          public final boolean isInitialized() {
5956            return true;
5957          }
5958    
5959          public Builder mergeFrom(
5960              com.google.protobuf.CodedInputStream input,
5961              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5962              throws java.io.IOException {
5963            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parsedMessage = null;
5964            try {
5965              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5966            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5967              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) e.getUnfinishedMessage();
5968              throw e;
5969            } finally {
5970              if (parsedMessage != null) {
5971                mergeFrom(parsedMessage);
5972              }
5973            }
5974            return this;
5975          }
5976    
5977          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5978        }
5979    
5980        static {
5981          defaultInstance = new StartLogSegmentResponseProto(true);
5982          defaultInstance.initFields();
5983        }
5984    
5985        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5986      }
5987    
5988      public interface FinalizeLogSegmentRequestProtoOrBuilder
5989          extends com.google.protobuf.MessageOrBuilder {
5990    
5991        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5992        /**
5993         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5994         */
5995        boolean hasReqInfo();
5996        /**
5997         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5998         */
5999        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
6000        /**
6001         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6002         */
6003        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
6004    
6005        // required uint64 startTxId = 2;
6006        /**
6007         * <code>required uint64 startTxId = 2;</code>
6008         */
6009        boolean hasStartTxId();
6010        /**
6011         * <code>required uint64 startTxId = 2;</code>
6012         */
6013        long getStartTxId();
6014    
6015        // required uint64 endTxId = 3;
6016        /**
6017         * <code>required uint64 endTxId = 3;</code>
6018         */
6019        boolean hasEndTxId();
6020        /**
6021         * <code>required uint64 endTxId = 3;</code>
6022         */
6023        long getEndTxId();
6024      }
6025      /**
6026       * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6027       *
6028       * <pre>
6029       **
6030       * finalizeLogSegment()
6031       * </pre>
6032       */
6033      public static final class FinalizeLogSegmentRequestProto extends
6034          com.google.protobuf.GeneratedMessage
6035          implements FinalizeLogSegmentRequestProtoOrBuilder {
6036        // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
6037        private FinalizeLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6038          super(builder);
6039          this.unknownFields = builder.getUnknownFields();
6040        }
6041        private FinalizeLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6042    
6043        private static final FinalizeLogSegmentRequestProto defaultInstance;
6044        public static FinalizeLogSegmentRequestProto getDefaultInstance() {
6045          return defaultInstance;
6046        }
6047    
6048        public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6049          return defaultInstance;
6050        }
6051    
6052        private final com.google.protobuf.UnknownFieldSet unknownFields;
6053        @java.lang.Override
6054        public final com.google.protobuf.UnknownFieldSet
6055            getUnknownFields() {
6056          return this.unknownFields;
6057        }
6058        private FinalizeLogSegmentRequestProto(
6059            com.google.protobuf.CodedInputStream input,
6060            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6061            throws com.google.protobuf.InvalidProtocolBufferException {
6062          initFields();
6063          int mutable_bitField0_ = 0;
6064          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6065              com.google.protobuf.UnknownFieldSet.newBuilder();
6066          try {
6067            boolean done = false;
6068            while (!done) {
6069              int tag = input.readTag();
6070              switch (tag) {
6071                case 0:
6072                  done = true;
6073                  break;
6074                default: {
6075                  if (!parseUnknownField(input, unknownFields,
6076                                         extensionRegistry, tag)) {
6077                    done = true;
6078                  }
6079                  break;
6080                }
6081                case 10: {
6082                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
6083                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
6084                    subBuilder = reqInfo_.toBuilder();
6085                  }
6086                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
6087                  if (subBuilder != null) {
6088                    subBuilder.mergeFrom(reqInfo_);
6089                    reqInfo_ = subBuilder.buildPartial();
6090                  }
6091                  bitField0_ |= 0x00000001;
6092                  break;
6093                }
6094                case 16: {
6095                  bitField0_ |= 0x00000002;
6096                  startTxId_ = input.readUInt64();
6097                  break;
6098                }
6099                case 24: {
6100                  bitField0_ |= 0x00000004;
6101                  endTxId_ = input.readUInt64();
6102                  break;
6103                }
6104              }
6105            }
6106          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6107            throw e.setUnfinishedMessage(this);
6108          } catch (java.io.IOException e) {
6109            throw new com.google.protobuf.InvalidProtocolBufferException(
6110                e.getMessage()).setUnfinishedMessage(this);
6111          } finally {
6112            this.unknownFields = unknownFields.build();
6113            makeExtensionsImmutable();
6114          }
6115        }
6116        public static final com.google.protobuf.Descriptors.Descriptor
6117            getDescriptor() {
6118          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6119        }
6120    
6121        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6122            internalGetFieldAccessorTable() {
6123          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6124              .ensureFieldAccessorsInitialized(
6125                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6126        }
6127    
6128        public static com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> PARSER =
6129            new com.google.protobuf.AbstractParser<FinalizeLogSegmentRequestProto>() {
6130          public FinalizeLogSegmentRequestProto parsePartialFrom(
6131              com.google.protobuf.CodedInputStream input,
6132              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6133              throws com.google.protobuf.InvalidProtocolBufferException {
6134            return new FinalizeLogSegmentRequestProto(input, extensionRegistry);
6135          }
6136        };
6137    
6138        @java.lang.Override
6139        public com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> getParserForType() {
6140          return PARSER;
6141        }
6142    
6143        private int bitField0_;
6144        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6145        public static final int REQINFO_FIELD_NUMBER = 1;
6146        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
6147        /**
6148         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6149         */
6150        public boolean hasReqInfo() {
6151          return ((bitField0_ & 0x00000001) == 0x00000001);
6152        }
6153        /**
6154         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6155         */
6156        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6157          return reqInfo_;
6158        }
6159        /**
6160         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6161         */
6162        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6163          return reqInfo_;
6164        }
6165    
6166        // required uint64 startTxId = 2;
6167        public static final int STARTTXID_FIELD_NUMBER = 2;
6168        private long startTxId_;
6169        /**
6170         * <code>required uint64 startTxId = 2;</code>
6171         */
6172        public boolean hasStartTxId() {
6173          return ((bitField0_ & 0x00000002) == 0x00000002);
6174        }
6175        /**
6176         * <code>required uint64 startTxId = 2;</code>
6177         */
6178        public long getStartTxId() {
6179          return startTxId_;
6180        }
6181    
6182        // required uint64 endTxId = 3;
6183        public static final int ENDTXID_FIELD_NUMBER = 3;
6184        private long endTxId_;
6185        /**
6186         * <code>required uint64 endTxId = 3;</code>
6187         */
6188        public boolean hasEndTxId() {
6189          return ((bitField0_ & 0x00000004) == 0x00000004);
6190        }
6191        /**
6192         * <code>required uint64 endTxId = 3;</code>
6193         */
6194        public long getEndTxId() {
6195          return endTxId_;
6196        }
6197    
6198        private void initFields() {
6199          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6200          startTxId_ = 0L;
6201          endTxId_ = 0L;
6202        }
6203        private byte memoizedIsInitialized = -1;
6204        public final boolean isInitialized() {
6205          byte isInitialized = memoizedIsInitialized;
6206          if (isInitialized != -1) return isInitialized == 1;
6207    
6208          if (!hasReqInfo()) {
6209            memoizedIsInitialized = 0;
6210            return false;
6211          }
6212          if (!hasStartTxId()) {
6213            memoizedIsInitialized = 0;
6214            return false;
6215          }
6216          if (!hasEndTxId()) {
6217            memoizedIsInitialized = 0;
6218            return false;
6219          }
6220          if (!getReqInfo().isInitialized()) {
6221            memoizedIsInitialized = 0;
6222            return false;
6223          }
6224          memoizedIsInitialized = 1;
6225          return true;
6226        }
6227    
6228        public void writeTo(com.google.protobuf.CodedOutputStream output)
6229                            throws java.io.IOException {
6230          getSerializedSize();
6231          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6232            output.writeMessage(1, reqInfo_);
6233          }
6234          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6235            output.writeUInt64(2, startTxId_);
6236          }
6237          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6238            output.writeUInt64(3, endTxId_);
6239          }
6240          getUnknownFields().writeTo(output);
6241        }
6242    
6243        private int memoizedSerializedSize = -1;
6244        public int getSerializedSize() {
6245          int size = memoizedSerializedSize;
6246          if (size != -1) return size;
6247    
6248          size = 0;
6249          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6250            size += com.google.protobuf.CodedOutputStream
6251              .computeMessageSize(1, reqInfo_);
6252          }
6253          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6254            size += com.google.protobuf.CodedOutputStream
6255              .computeUInt64Size(2, startTxId_);
6256          }
6257          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6258            size += com.google.protobuf.CodedOutputStream
6259              .computeUInt64Size(3, endTxId_);
6260          }
6261          size += getUnknownFields().getSerializedSize();
6262          memoizedSerializedSize = size;
6263          return size;
6264        }
6265    
6266        private static final long serialVersionUID = 0L;
6267        @java.lang.Override
6268        protected java.lang.Object writeReplace()
6269            throws java.io.ObjectStreamException {
6270          return super.writeReplace();
6271        }
6272    
6273        @java.lang.Override
6274        public boolean equals(final java.lang.Object obj) {
6275          if (obj == this) {
6276           return true;
6277          }
6278          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
6279            return super.equals(obj);
6280          }
6281          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
6282    
6283          boolean result = true;
6284          result = result && (hasReqInfo() == other.hasReqInfo());
6285          if (hasReqInfo()) {
6286            result = result && getReqInfo()
6287                .equals(other.getReqInfo());
6288          }
6289          result = result && (hasStartTxId() == other.hasStartTxId());
6290          if (hasStartTxId()) {
6291            result = result && (getStartTxId()
6292                == other.getStartTxId());
6293          }
6294          result = result && (hasEndTxId() == other.hasEndTxId());
6295          if (hasEndTxId()) {
6296            result = result && (getEndTxId()
6297                == other.getEndTxId());
6298          }
6299          result = result &&
6300              getUnknownFields().equals(other.getUnknownFields());
6301          return result;
6302        }
6303    
6304        private int memoizedHashCode = 0;
6305        @java.lang.Override
6306        public int hashCode() {
6307          if (memoizedHashCode != 0) {
6308            return memoizedHashCode;
6309          }
6310          int hash = 41;
6311          hash = (19 * hash) + getDescriptorForType().hashCode();
6312          if (hasReqInfo()) {
6313            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
6314            hash = (53 * hash) + getReqInfo().hashCode();
6315          }
6316          if (hasStartTxId()) {
6317            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
6318            hash = (53 * hash) + hashLong(getStartTxId());
6319          }
6320          if (hasEndTxId()) {
6321            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
6322            hash = (53 * hash) + hashLong(getEndTxId());
6323          }
6324          hash = (29 * hash) + getUnknownFields().hashCode();
6325          memoizedHashCode = hash;
6326          return hash;
6327        }
6328    
6329        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6330            com.google.protobuf.ByteString data)
6331            throws com.google.protobuf.InvalidProtocolBufferException {
6332          return PARSER.parseFrom(data);
6333        }
6334        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6335            com.google.protobuf.ByteString data,
6336            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6337            throws com.google.protobuf.InvalidProtocolBufferException {
6338          return PARSER.parseFrom(data, extensionRegistry);
6339        }
6340        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
6341            throws com.google.protobuf.InvalidProtocolBufferException {
6342          return PARSER.parseFrom(data);
6343        }
6344        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6345            byte[] data,
6346            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6347            throws com.google.protobuf.InvalidProtocolBufferException {
6348          return PARSER.parseFrom(data, extensionRegistry);
6349        }
6350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
6351            throws java.io.IOException {
6352          return PARSER.parseFrom(input);
6353        }
6354        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6355            java.io.InputStream input,
6356            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6357            throws java.io.IOException {
6358          return PARSER.parseFrom(input, extensionRegistry);
6359        }
6360        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
6361            throws java.io.IOException {
6362          return PARSER.parseDelimitedFrom(input);
6363        }
6364        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
6365            java.io.InputStream input,
6366            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6367            throws java.io.IOException {
6368          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6369        }
6370        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6371            com.google.protobuf.CodedInputStream input)
6372            throws java.io.IOException {
6373          return PARSER.parseFrom(input);
6374        }
6375        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6376            com.google.protobuf.CodedInputStream input,
6377            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6378            throws java.io.IOException {
6379          return PARSER.parseFrom(input, extensionRegistry);
6380        }
6381    
6382        public static Builder newBuilder() { return Builder.create(); }
6383        public Builder newBuilderForType() { return newBuilder(); }
6384        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
6385          return newBuilder().mergeFrom(prototype);
6386        }
6387        public Builder toBuilder() { return newBuilder(this); }
6388    
6389        @java.lang.Override
6390        protected Builder newBuilderForType(
6391            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6392          Builder builder = new Builder(parent);
6393          return builder;
6394        }
6395        /**
6396         * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6397         *
6398         * <pre>
6399         **
6400         * finalizeLogSegment()
6401         * </pre>
6402         */
6403        public static final class Builder extends
6404            com.google.protobuf.GeneratedMessage.Builder<Builder>
6405           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
6406          public static final com.google.protobuf.Descriptors.Descriptor
6407              getDescriptor() {
6408            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6409          }
6410    
6411          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6412              internalGetFieldAccessorTable() {
6413            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6414                .ensureFieldAccessorsInitialized(
6415                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6416          }
6417    
6418          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
6419          private Builder() {
6420            maybeForceBuilderInitialization();
6421          }
6422    
6423          private Builder(
6424              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6425            super(parent);
6426            maybeForceBuilderInitialization();
6427          }
6428          private void maybeForceBuilderInitialization() {
6429            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6430              getReqInfoFieldBuilder();
6431            }
6432          }
6433          private static Builder create() {
6434            return new Builder();
6435          }
6436    
6437          public Builder clear() {
6438            super.clear();
6439            if (reqInfoBuilder_ == null) {
6440              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6441            } else {
6442              reqInfoBuilder_.clear();
6443            }
6444            bitField0_ = (bitField0_ & ~0x00000001);
6445            startTxId_ = 0L;
6446            bitField0_ = (bitField0_ & ~0x00000002);
6447            endTxId_ = 0L;
6448            bitField0_ = (bitField0_ & ~0x00000004);
6449            return this;
6450          }
6451    
6452          public Builder clone() {
6453            return create().mergeFrom(buildPartial());
6454          }
6455    
6456          public com.google.protobuf.Descriptors.Descriptor
6457              getDescriptorForType() {
6458            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6459          }
6460    
6461          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
6463          }
6464    
6465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
6466            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
6467            if (!result.isInitialized()) {
6468              throw newUninitializedMessageException(result);
6469            }
6470            return result;
6471          }
6472    
6473          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
6474            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
6475            int from_bitField0_ = bitField0_;
6476            int to_bitField0_ = 0;
6477            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6478              to_bitField0_ |= 0x00000001;
6479            }
6480            if (reqInfoBuilder_ == null) {
6481              result.reqInfo_ = reqInfo_;
6482            } else {
6483              result.reqInfo_ = reqInfoBuilder_.build();
6484            }
6485            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6486              to_bitField0_ |= 0x00000002;
6487            }
6488            result.startTxId_ = startTxId_;
6489            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
6490              to_bitField0_ |= 0x00000004;
6491            }
6492            result.endTxId_ = endTxId_;
6493            result.bitField0_ = to_bitField0_;
6494            onBuilt();
6495            return result;
6496          }
6497    
6498          public Builder mergeFrom(com.google.protobuf.Message other) {
6499            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
6500              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
6501            } else {
6502              super.mergeFrom(other);
6503              return this;
6504            }
6505          }
6506    
6507          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
6508            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
6509            if (other.hasReqInfo()) {
6510              mergeReqInfo(other.getReqInfo());
6511            }
6512            if (other.hasStartTxId()) {
6513              setStartTxId(other.getStartTxId());
6514            }
6515            if (other.hasEndTxId()) {
6516              setEndTxId(other.getEndTxId());
6517            }
6518            this.mergeUnknownFields(other.getUnknownFields());
6519            return this;
6520          }
6521    
6522          public final boolean isInitialized() {
6523            if (!hasReqInfo()) {
6524              
6525              return false;
6526            }
6527            if (!hasStartTxId()) {
6528              
6529              return false;
6530            }
6531            if (!hasEndTxId()) {
6532              
6533              return false;
6534            }
6535            if (!getReqInfo().isInitialized()) {
6536              
6537              return false;
6538            }
6539            return true;
6540          }
6541    
6542          public Builder mergeFrom(
6543              com.google.protobuf.CodedInputStream input,
6544              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6545              throws java.io.IOException {
6546            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parsedMessage = null;
6547            try {
6548              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6549            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6550              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) e.getUnfinishedMessage();
6551              throw e;
6552            } finally {
6553              if (parsedMessage != null) {
6554                mergeFrom(parsedMessage);
6555              }
6556            }
6557            return this;
6558          }
6559          private int bitField0_;
6560    
6561          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6562          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6563          private com.google.protobuf.SingleFieldBuilder<
6564              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
6565          /**
6566           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6567           */
6568          public boolean hasReqInfo() {
6569            return ((bitField0_ & 0x00000001) == 0x00000001);
6570          }
6571          /**
6572           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6573           */
6574          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6575            if (reqInfoBuilder_ == null) {
6576              return reqInfo_;
6577            } else {
6578              return reqInfoBuilder_.getMessage();
6579            }
6580          }
6581          /**
6582           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6583           */
6584          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6585            if (reqInfoBuilder_ == null) {
6586              if (value == null) {
6587                throw new NullPointerException();
6588              }
6589              reqInfo_ = value;
6590              onChanged();
6591            } else {
6592              reqInfoBuilder_.setMessage(value);
6593            }
6594            bitField0_ |= 0x00000001;
6595            return this;
6596          }
6597          /**
6598           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6599           */
6600          public Builder setReqInfo(
6601              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
6602            if (reqInfoBuilder_ == null) {
6603              reqInfo_ = builderForValue.build();
6604              onChanged();
6605            } else {
6606              reqInfoBuilder_.setMessage(builderForValue.build());
6607            }
6608            bitField0_ |= 0x00000001;
6609            return this;
6610          }
6611          /**
6612           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6613           */
6614          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6615            if (reqInfoBuilder_ == null) {
6616              if (((bitField0_ & 0x00000001) == 0x00000001) &&
6617                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
6618                reqInfo_ =
6619                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
6620              } else {
6621                reqInfo_ = value;
6622              }
6623              onChanged();
6624            } else {
6625              reqInfoBuilder_.mergeFrom(value);
6626            }
6627            bitField0_ |= 0x00000001;
6628            return this;
6629          }
6630          /**
6631           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6632           */
6633          public Builder clearReqInfo() {
6634            if (reqInfoBuilder_ == null) {
6635              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6636              onChanged();
6637            } else {
6638              reqInfoBuilder_.clear();
6639            }
6640            bitField0_ = (bitField0_ & ~0x00000001);
6641            return this;
6642          }
6643          /**
6644           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6645           */
6646          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
6647            bitField0_ |= 0x00000001;
6648            onChanged();
6649            return getReqInfoFieldBuilder().getBuilder();
6650          }
6651          /**
6652           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6653           */
6654          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6655            if (reqInfoBuilder_ != null) {
6656              return reqInfoBuilder_.getMessageOrBuilder();
6657            } else {
6658              return reqInfo_;
6659            }
6660          }
6661          /**
6662           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6663           */
6664          private com.google.protobuf.SingleFieldBuilder<
6665              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
6666              getReqInfoFieldBuilder() {
6667            if (reqInfoBuilder_ == null) {
6668              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6669                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
6670                      reqInfo_,
6671                      getParentForChildren(),
6672                      isClean());
6673              reqInfo_ = null;
6674            }
6675            return reqInfoBuilder_;
6676          }
6677    
6678          // required uint64 startTxId = 2;
6679          private long startTxId_ ;
6680          /**
6681           * <code>required uint64 startTxId = 2;</code>
6682           */
6683          public boolean hasStartTxId() {
6684            return ((bitField0_ & 0x00000002) == 0x00000002);
6685          }
6686          /**
6687           * <code>required uint64 startTxId = 2;</code>
6688           */
6689          public long getStartTxId() {
6690            return startTxId_;
6691          }
6692          /**
6693           * <code>required uint64 startTxId = 2;</code>
6694           */
6695          public Builder setStartTxId(long value) {
6696            bitField0_ |= 0x00000002;
6697            startTxId_ = value;
6698            onChanged();
6699            return this;
6700          }
6701          /**
6702           * <code>required uint64 startTxId = 2;</code>
6703           */
6704          public Builder clearStartTxId() {
6705            bitField0_ = (bitField0_ & ~0x00000002);
6706            startTxId_ = 0L;
6707            onChanged();
6708            return this;
6709          }
6710    
6711          // required uint64 endTxId = 3;
6712          private long endTxId_ ;
6713          /**
6714           * <code>required uint64 endTxId = 3;</code>
6715           */
6716          public boolean hasEndTxId() {
6717            return ((bitField0_ & 0x00000004) == 0x00000004);
6718          }
6719          /**
6720           * <code>required uint64 endTxId = 3;</code>
6721           */
6722          public long getEndTxId() {
6723            return endTxId_;
6724          }
6725          /**
6726           * <code>required uint64 endTxId = 3;</code>
6727           */
6728          public Builder setEndTxId(long value) {
6729            bitField0_ |= 0x00000004;
6730            endTxId_ = value;
6731            onChanged();
6732            return this;
6733          }
6734          /**
6735           * <code>required uint64 endTxId = 3;</code>
6736           */
6737          public Builder clearEndTxId() {
6738            bitField0_ = (bitField0_ & ~0x00000004);
6739            endTxId_ = 0L;
6740            onChanged();
6741            return this;
6742          }
6743    
6744          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6745        }
6746    
6747        static {
6748          defaultInstance = new FinalizeLogSegmentRequestProto(true);
6749          defaultInstance.initFields();
6750        }
6751    
6752        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6753      }
6754    
6755      public interface FinalizeLogSegmentResponseProtoOrBuilder
6756          extends com.google.protobuf.MessageOrBuilder {
6757      }
6758      /**
6759       * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6760       */
6761      public static final class FinalizeLogSegmentResponseProto extends
6762          com.google.protobuf.GeneratedMessage
6763          implements FinalizeLogSegmentResponseProtoOrBuilder {
6764        // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
6765        private FinalizeLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6766          super(builder);
6767          this.unknownFields = builder.getUnknownFields();
6768        }
6769        private FinalizeLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6770    
6771        private static final FinalizeLogSegmentResponseProto defaultInstance;
6772        public static FinalizeLogSegmentResponseProto getDefaultInstance() {
6773          return defaultInstance;
6774        }
6775    
6776        public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
6777          return defaultInstance;
6778        }
6779    
6780        private final com.google.protobuf.UnknownFieldSet unknownFields;
6781        @java.lang.Override
6782        public final com.google.protobuf.UnknownFieldSet
6783            getUnknownFields() {
6784          return this.unknownFields;
6785        }
6786        private FinalizeLogSegmentResponseProto(
6787            com.google.protobuf.CodedInputStream input,
6788            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6789            throws com.google.protobuf.InvalidProtocolBufferException {
6790          initFields();
6791          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6792              com.google.protobuf.UnknownFieldSet.newBuilder();
6793          try {
6794            boolean done = false;
6795            while (!done) {
6796              int tag = input.readTag();
6797              switch (tag) {
6798                case 0:
6799                  done = true;
6800                  break;
6801                default: {
6802                  if (!parseUnknownField(input, unknownFields,
6803                                         extensionRegistry, tag)) {
6804                    done = true;
6805                  }
6806                  break;
6807                }
6808              }
6809            }
6810          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6811            throw e.setUnfinishedMessage(this);
6812          } catch (java.io.IOException e) {
6813            throw new com.google.protobuf.InvalidProtocolBufferException(
6814                e.getMessage()).setUnfinishedMessage(this);
6815          } finally {
6816            this.unknownFields = unknownFields.build();
6817            makeExtensionsImmutable();
6818          }
6819        }
6820        public static final com.google.protobuf.Descriptors.Descriptor
6821            getDescriptor() {
6822          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6823        }
6824    
6825        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6826            internalGetFieldAccessorTable() {
6827          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6828              .ensureFieldAccessorsInitialized(
6829                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6830        }
6831    
6832        public static com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> PARSER =
6833            new com.google.protobuf.AbstractParser<FinalizeLogSegmentResponseProto>() {
6834          public FinalizeLogSegmentResponseProto parsePartialFrom(
6835              com.google.protobuf.CodedInputStream input,
6836              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6837              throws com.google.protobuf.InvalidProtocolBufferException {
6838            return new FinalizeLogSegmentResponseProto(input, extensionRegistry);
6839          }
6840        };
6841    
6842        @java.lang.Override
6843        public com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> getParserForType() {
6844          return PARSER;
6845        }
6846    
6847        private void initFields() {
6848        }
6849        private byte memoizedIsInitialized = -1;
6850        public final boolean isInitialized() {
6851          byte isInitialized = memoizedIsInitialized;
6852          if (isInitialized != -1) return isInitialized == 1;
6853    
6854          memoizedIsInitialized = 1;
6855          return true;
6856        }
6857    
6858        public void writeTo(com.google.protobuf.CodedOutputStream output)
6859                            throws java.io.IOException {
6860          getSerializedSize();
6861          getUnknownFields().writeTo(output);
6862        }
6863    
6864        private int memoizedSerializedSize = -1;
6865        public int getSerializedSize() {
6866          int size = memoizedSerializedSize;
6867          if (size != -1) return size;
6868    
6869          size = 0;
6870          size += getUnknownFields().getSerializedSize();
6871          memoizedSerializedSize = size;
6872          return size;
6873        }
6874    
6875        private static final long serialVersionUID = 0L;
6876        @java.lang.Override
6877        protected java.lang.Object writeReplace()
6878            throws java.io.ObjectStreamException {
6879          return super.writeReplace();
6880        }
6881    
6882        @java.lang.Override
6883        public boolean equals(final java.lang.Object obj) {
6884          if (obj == this) {
6885           return true;
6886          }
6887          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
6888            return super.equals(obj);
6889          }
6890          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
6891    
6892          boolean result = true;
6893          result = result &&
6894              getUnknownFields().equals(other.getUnknownFields());
6895          return result;
6896        }
6897    
6898        private int memoizedHashCode = 0;
6899        @java.lang.Override
6900        public int hashCode() {
6901          if (memoizedHashCode != 0) {
6902            return memoizedHashCode;
6903          }
6904          int hash = 41;
6905          hash = (19 * hash) + getDescriptorForType().hashCode();
6906          hash = (29 * hash) + getUnknownFields().hashCode();
6907          memoizedHashCode = hash;
6908          return hash;
6909        }
6910    
6911        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6912            com.google.protobuf.ByteString data)
6913            throws com.google.protobuf.InvalidProtocolBufferException {
6914          return PARSER.parseFrom(data);
6915        }
6916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6917            com.google.protobuf.ByteString data,
6918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6919            throws com.google.protobuf.InvalidProtocolBufferException {
6920          return PARSER.parseFrom(data, extensionRegistry);
6921        }
6922        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
6923            throws com.google.protobuf.InvalidProtocolBufferException {
6924          return PARSER.parseFrom(data);
6925        }
6926        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6927            byte[] data,
6928            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6929            throws com.google.protobuf.InvalidProtocolBufferException {
6930          return PARSER.parseFrom(data, extensionRegistry);
6931        }
6932        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
6933            throws java.io.IOException {
6934          return PARSER.parseFrom(input);
6935        }
6936        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6937            java.io.InputStream input,
6938            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6939            throws java.io.IOException {
6940          return PARSER.parseFrom(input, extensionRegistry);
6941        }
6942        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
6943            throws java.io.IOException {
6944          return PARSER.parseDelimitedFrom(input);
6945        }
6946        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
6947            java.io.InputStream input,
6948            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6949            throws java.io.IOException {
6950          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6951        }
6952        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6953            com.google.protobuf.CodedInputStream input)
6954            throws java.io.IOException {
6955          return PARSER.parseFrom(input);
6956        }
6957        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6958            com.google.protobuf.CodedInputStream input,
6959            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6960            throws java.io.IOException {
6961          return PARSER.parseFrom(input, extensionRegistry);
6962        }
6963    
6964        public static Builder newBuilder() { return Builder.create(); }
6965        public Builder newBuilderForType() { return newBuilder(); }
6966        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
6967          return newBuilder().mergeFrom(prototype);
6968        }
6969        public Builder toBuilder() { return newBuilder(this); }
6970    
6971        @java.lang.Override
6972        protected Builder newBuilderForType(
6973            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6974          Builder builder = new Builder(parent);
6975          return builder;
6976        }
6977        /**
6978         * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6979         */
6980        public static final class Builder extends
6981            com.google.protobuf.GeneratedMessage.Builder<Builder>
6982           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
6983          public static final com.google.protobuf.Descriptors.Descriptor
6984              getDescriptor() {
6985            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6986          }
6987    
6988          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6989              internalGetFieldAccessorTable() {
6990            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6991                .ensureFieldAccessorsInitialized(
6992                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6993          }
6994    
6995          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
6996          private Builder() {
6997            maybeForceBuilderInitialization();
6998          }
6999    
7000          private Builder(
7001              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7002            super(parent);
7003            maybeForceBuilderInitialization();
7004          }
7005          private void maybeForceBuilderInitialization() {
7006            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7007            }
7008          }
7009          private static Builder create() {
7010            return new Builder();
7011          }
7012    
7013          public Builder clear() {
7014            super.clear();
7015            return this;
7016          }
7017    
7018          public Builder clone() {
7019            return create().mergeFrom(buildPartial());
7020          }
7021    
7022          public com.google.protobuf.Descriptors.Descriptor
7023              getDescriptorForType() {
7024            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
7025          }
7026    
7027          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
7028            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
7029          }
7030    
7031          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
7032            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
7033            if (!result.isInitialized()) {
7034              throw newUninitializedMessageException(result);
7035            }
7036            return result;
7037          }
7038    
7039          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
7040            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
7041            onBuilt();
7042            return result;
7043          }
7044    
7045          public Builder mergeFrom(com.google.protobuf.Message other) {
7046            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
7047              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
7048            } else {
7049              super.mergeFrom(other);
7050              return this;
7051            }
7052          }
7053    
7054          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
7055            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
7056            this.mergeUnknownFields(other.getUnknownFields());
7057            return this;
7058          }
7059    
7060          public final boolean isInitialized() {
7061            return true;
7062          }
7063    
7064          public Builder mergeFrom(
7065              com.google.protobuf.CodedInputStream input,
7066              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7067              throws java.io.IOException {
7068            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parsedMessage = null;
7069            try {
7070              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7071            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7072              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) e.getUnfinishedMessage();
7073              throw e;
7074            } finally {
7075              if (parsedMessage != null) {
7076                mergeFrom(parsedMessage);
7077              }
7078            }
7079            return this;
7080          }
7081    
7082          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7083        }
7084    
7085        static {
7086          defaultInstance = new FinalizeLogSegmentResponseProto(true);
7087          defaultInstance.initFields();
7088        }
7089    
7090        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7091      }
7092    
7093      public interface PurgeLogsRequestProtoOrBuilder
7094          extends com.google.protobuf.MessageOrBuilder {
7095    
7096        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7097        /**
7098         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7099         */
7100        boolean hasReqInfo();
7101        /**
7102         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7103         */
7104        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
7105        /**
7106         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7107         */
7108        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
7109    
7110        // required uint64 minTxIdToKeep = 2;
7111        /**
7112         * <code>required uint64 minTxIdToKeep = 2;</code>
7113         */
7114        boolean hasMinTxIdToKeep();
7115        /**
7116         * <code>required uint64 minTxIdToKeep = 2;</code>
7117         */
7118        long getMinTxIdToKeep();
7119      }
7120      /**
7121       * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7122       *
7123       * <pre>
7124       **
7125       * purgeLogs()
7126       * </pre>
7127       */
7128      public static final class PurgeLogsRequestProto extends
7129          com.google.protobuf.GeneratedMessage
7130          implements PurgeLogsRequestProtoOrBuilder {
7131        // Use PurgeLogsRequestProto.newBuilder() to construct.
7132        private PurgeLogsRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7133          super(builder);
7134          this.unknownFields = builder.getUnknownFields();
7135        }
7136        private PurgeLogsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7137    
7138        private static final PurgeLogsRequestProto defaultInstance;
7139        public static PurgeLogsRequestProto getDefaultInstance() {
7140          return defaultInstance;
7141        }
7142    
7143        public PurgeLogsRequestProto getDefaultInstanceForType() {
7144          return defaultInstance;
7145        }
7146    
7147        private final com.google.protobuf.UnknownFieldSet unknownFields;
7148        @java.lang.Override
7149        public final com.google.protobuf.UnknownFieldSet
7150            getUnknownFields() {
7151          return this.unknownFields;
7152        }
7153        private PurgeLogsRequestProto(
7154            com.google.protobuf.CodedInputStream input,
7155            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7156            throws com.google.protobuf.InvalidProtocolBufferException {
7157          initFields();
7158          int mutable_bitField0_ = 0;
7159          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7160              com.google.protobuf.UnknownFieldSet.newBuilder();
7161          try {
7162            boolean done = false;
7163            while (!done) {
7164              int tag = input.readTag();
7165              switch (tag) {
7166                case 0:
7167                  done = true;
7168                  break;
7169                default: {
7170                  if (!parseUnknownField(input, unknownFields,
7171                                         extensionRegistry, tag)) {
7172                    done = true;
7173                  }
7174                  break;
7175                }
7176                case 10: {
7177                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
7178                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
7179                    subBuilder = reqInfo_.toBuilder();
7180                  }
7181                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
7182                  if (subBuilder != null) {
7183                    subBuilder.mergeFrom(reqInfo_);
7184                    reqInfo_ = subBuilder.buildPartial();
7185                  }
7186                  bitField0_ |= 0x00000001;
7187                  break;
7188                }
7189                case 16: {
7190                  bitField0_ |= 0x00000002;
7191                  minTxIdToKeep_ = input.readUInt64();
7192                  break;
7193                }
7194              }
7195            }
7196          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7197            throw e.setUnfinishedMessage(this);
7198          } catch (java.io.IOException e) {
7199            throw new com.google.protobuf.InvalidProtocolBufferException(
7200                e.getMessage()).setUnfinishedMessage(this);
7201          } finally {
7202            this.unknownFields = unknownFields.build();
7203            makeExtensionsImmutable();
7204          }
7205        }
7206        public static final com.google.protobuf.Descriptors.Descriptor
7207            getDescriptor() {
7208          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7209        }
7210    
7211        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7212            internalGetFieldAccessorTable() {
7213          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7214              .ensureFieldAccessorsInitialized(
7215                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7216        }
7217    
7218        public static com.google.protobuf.Parser<PurgeLogsRequestProto> PARSER =
7219            new com.google.protobuf.AbstractParser<PurgeLogsRequestProto>() {
7220          public PurgeLogsRequestProto parsePartialFrom(
7221              com.google.protobuf.CodedInputStream input,
7222              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7223              throws com.google.protobuf.InvalidProtocolBufferException {
7224            return new PurgeLogsRequestProto(input, extensionRegistry);
7225          }
7226        };
7227    
7228        @java.lang.Override
7229        public com.google.protobuf.Parser<PurgeLogsRequestProto> getParserForType() {
7230          return PARSER;
7231        }
7232    
7233        private int bitField0_;
7234        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7235        public static final int REQINFO_FIELD_NUMBER = 1;
7236        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
7237        /**
7238         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7239         */
7240        public boolean hasReqInfo() {
7241          return ((bitField0_ & 0x00000001) == 0x00000001);
7242        }
7243        /**
7244         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7245         */
7246        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7247          return reqInfo_;
7248        }
7249        /**
7250         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7251         */
7252        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7253          return reqInfo_;
7254        }
7255    
7256        // required uint64 minTxIdToKeep = 2;
7257        public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
7258        private long minTxIdToKeep_;
7259        /**
7260         * <code>required uint64 minTxIdToKeep = 2;</code>
7261         */
7262        public boolean hasMinTxIdToKeep() {
7263          return ((bitField0_ & 0x00000002) == 0x00000002);
7264        }
7265        /**
7266         * <code>required uint64 minTxIdToKeep = 2;</code>
7267         */
7268        public long getMinTxIdToKeep() {
7269          return minTxIdToKeep_;
7270        }
7271    
7272        private void initFields() {
7273          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7274          minTxIdToKeep_ = 0L;
7275        }
7276        private byte memoizedIsInitialized = -1;
7277        public final boolean isInitialized() {
7278          byte isInitialized = memoizedIsInitialized;
7279          if (isInitialized != -1) return isInitialized == 1;
7280    
7281          if (!hasReqInfo()) {
7282            memoizedIsInitialized = 0;
7283            return false;
7284          }
7285          if (!hasMinTxIdToKeep()) {
7286            memoizedIsInitialized = 0;
7287            return false;
7288          }
7289          if (!getReqInfo().isInitialized()) {
7290            memoizedIsInitialized = 0;
7291            return false;
7292          }
7293          memoizedIsInitialized = 1;
7294          return true;
7295        }
7296    
7297        public void writeTo(com.google.protobuf.CodedOutputStream output)
7298                            throws java.io.IOException {
7299          getSerializedSize();
7300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7301            output.writeMessage(1, reqInfo_);
7302          }
7303          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7304            output.writeUInt64(2, minTxIdToKeep_);
7305          }
7306          getUnknownFields().writeTo(output);
7307        }
7308    
7309        private int memoizedSerializedSize = -1;
7310        public int getSerializedSize() {
7311          int size = memoizedSerializedSize;
7312          if (size != -1) return size;
7313    
7314          size = 0;
7315          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7316            size += com.google.protobuf.CodedOutputStream
7317              .computeMessageSize(1, reqInfo_);
7318          }
7319          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7320            size += com.google.protobuf.CodedOutputStream
7321              .computeUInt64Size(2, minTxIdToKeep_);
7322          }
7323          size += getUnknownFields().getSerializedSize();
7324          memoizedSerializedSize = size;
7325          return size;
7326        }
7327    
7328        private static final long serialVersionUID = 0L;
7329        @java.lang.Override
7330        protected java.lang.Object writeReplace()
7331            throws java.io.ObjectStreamException {
7332          return super.writeReplace();
7333        }
7334    
7335        @java.lang.Override
7336        public boolean equals(final java.lang.Object obj) {
7337          if (obj == this) {
7338           return true;
7339          }
7340          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
7341            return super.equals(obj);
7342          }
7343          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
7344    
7345          boolean result = true;
7346          result = result && (hasReqInfo() == other.hasReqInfo());
7347          if (hasReqInfo()) {
7348            result = result && getReqInfo()
7349                .equals(other.getReqInfo());
7350          }
7351          result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
7352          if (hasMinTxIdToKeep()) {
7353            result = result && (getMinTxIdToKeep()
7354                == other.getMinTxIdToKeep());
7355          }
7356          result = result &&
7357              getUnknownFields().equals(other.getUnknownFields());
7358          return result;
7359        }
7360    
7361        private int memoizedHashCode = 0;
7362        @java.lang.Override
7363        public int hashCode() {
7364          if (memoizedHashCode != 0) {
7365            return memoizedHashCode;
7366          }
7367          int hash = 41;
7368          hash = (19 * hash) + getDescriptorForType().hashCode();
7369          if (hasReqInfo()) {
7370            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
7371            hash = (53 * hash) + getReqInfo().hashCode();
7372          }
7373          if (hasMinTxIdToKeep()) {
7374            hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
7375            hash = (53 * hash) + hashLong(getMinTxIdToKeep());
7376          }
7377          hash = (29 * hash) + getUnknownFields().hashCode();
7378          memoizedHashCode = hash;
7379          return hash;
7380        }
7381    
7382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7383            com.google.protobuf.ByteString data)
7384            throws com.google.protobuf.InvalidProtocolBufferException {
7385          return PARSER.parseFrom(data);
7386        }
7387        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7388            com.google.protobuf.ByteString data,
7389            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7390            throws com.google.protobuf.InvalidProtocolBufferException {
7391          return PARSER.parseFrom(data, extensionRegistry);
7392        }
7393        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
7394            throws com.google.protobuf.InvalidProtocolBufferException {
7395          return PARSER.parseFrom(data);
7396        }
7397        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7398            byte[] data,
7399            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7400            throws com.google.protobuf.InvalidProtocolBufferException {
7401          return PARSER.parseFrom(data, extensionRegistry);
7402        }
7403        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
7404            throws java.io.IOException {
7405          return PARSER.parseFrom(input);
7406        }
7407        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7408            java.io.InputStream input,
7409            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7410            throws java.io.IOException {
7411          return PARSER.parseFrom(input, extensionRegistry);
7412        }
7413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
7414            throws java.io.IOException {
7415          return PARSER.parseDelimitedFrom(input);
7416        }
7417        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
7418            java.io.InputStream input,
7419            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7420            throws java.io.IOException {
7421          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7422        }
7423        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7424            com.google.protobuf.CodedInputStream input)
7425            throws java.io.IOException {
7426          return PARSER.parseFrom(input);
7427        }
7428        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7429            com.google.protobuf.CodedInputStream input,
7430            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7431            throws java.io.IOException {
7432          return PARSER.parseFrom(input, extensionRegistry);
7433        }
7434    
7435        public static Builder newBuilder() { return Builder.create(); }
7436        public Builder newBuilderForType() { return newBuilder(); }
7437        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
7438          return newBuilder().mergeFrom(prototype);
7439        }
7440        public Builder toBuilder() { return newBuilder(this); }
7441    
7442        @java.lang.Override
7443        protected Builder newBuilderForType(
7444            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7445          Builder builder = new Builder(parent);
7446          return builder;
7447        }
7448        /**
7449         * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7450         *
7451         * <pre>
7452         **
7453         * purgeLogs()
7454         * </pre>
7455         */
7456        public static final class Builder extends
7457            com.google.protobuf.GeneratedMessage.Builder<Builder>
7458           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
7459          public static final com.google.protobuf.Descriptors.Descriptor
7460              getDescriptor() {
7461            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7462          }
7463    
7464          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7465              internalGetFieldAccessorTable() {
7466            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7467                .ensureFieldAccessorsInitialized(
7468                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7469          }
7470    
7471          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
7472          private Builder() {
7473            maybeForceBuilderInitialization();
7474          }
7475    
7476          private Builder(
7477              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7478            super(parent);
7479            maybeForceBuilderInitialization();
7480          }
7481          private void maybeForceBuilderInitialization() {
7482            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7483              getReqInfoFieldBuilder();
7484            }
7485          }
7486          private static Builder create() {
7487            return new Builder();
7488          }
7489    
7490          public Builder clear() {
7491            super.clear();
7492            if (reqInfoBuilder_ == null) {
7493              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7494            } else {
7495              reqInfoBuilder_.clear();
7496            }
7497            bitField0_ = (bitField0_ & ~0x00000001);
7498            minTxIdToKeep_ = 0L;
7499            bitField0_ = (bitField0_ & ~0x00000002);
7500            return this;
7501          }
7502    
7503          public Builder clone() {
7504            return create().mergeFrom(buildPartial());
7505          }
7506    
7507          public com.google.protobuf.Descriptors.Descriptor
7508              getDescriptorForType() {
7509            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7510          }
7511    
7512          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
7513            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
7514          }
7515    
7516          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
7517            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
7518            if (!result.isInitialized()) {
7519              throw newUninitializedMessageException(result);
7520            }
7521            return result;
7522          }
7523    
7524          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
7525            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
7526            int from_bitField0_ = bitField0_;
7527            int to_bitField0_ = 0;
7528            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7529              to_bitField0_ |= 0x00000001;
7530            }
7531            if (reqInfoBuilder_ == null) {
7532              result.reqInfo_ = reqInfo_;
7533            } else {
7534              result.reqInfo_ = reqInfoBuilder_.build();
7535            }
7536            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7537              to_bitField0_ |= 0x00000002;
7538            }
7539            result.minTxIdToKeep_ = minTxIdToKeep_;
7540            result.bitField0_ = to_bitField0_;
7541            onBuilt();
7542            return result;
7543          }
7544    
7545          public Builder mergeFrom(com.google.protobuf.Message other) {
7546            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
7547              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
7548            } else {
7549              super.mergeFrom(other);
7550              return this;
7551            }
7552          }
7553    
7554          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
7555            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
7556            if (other.hasReqInfo()) {
7557              mergeReqInfo(other.getReqInfo());
7558            }
7559            if (other.hasMinTxIdToKeep()) {
7560              setMinTxIdToKeep(other.getMinTxIdToKeep());
7561            }
7562            this.mergeUnknownFields(other.getUnknownFields());
7563            return this;
7564          }
7565    
7566          public final boolean isInitialized() {
7567            if (!hasReqInfo()) {
7568              
7569              return false;
7570            }
7571            if (!hasMinTxIdToKeep()) {
7572              
7573              return false;
7574            }
7575            if (!getReqInfo().isInitialized()) {
7576              
7577              return false;
7578            }
7579            return true;
7580          }
7581    
7582          public Builder mergeFrom(
7583              com.google.protobuf.CodedInputStream input,
7584              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7585              throws java.io.IOException {
7586            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parsedMessage = null;
7587            try {
7588              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7589            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7590              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) e.getUnfinishedMessage();
7591              throw e;
7592            } finally {
7593              if (parsedMessage != null) {
7594                mergeFrom(parsedMessage);
7595              }
7596            }
7597            return this;
7598          }
7599          private int bitField0_;
7600    
7601          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7602          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7603          private com.google.protobuf.SingleFieldBuilder<
7604              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
7605          /**
7606           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7607           */
7608          public boolean hasReqInfo() {
7609            return ((bitField0_ & 0x00000001) == 0x00000001);
7610          }
7611          /**
7612           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7613           */
7614          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7615            if (reqInfoBuilder_ == null) {
7616              return reqInfo_;
7617            } else {
7618              return reqInfoBuilder_.getMessage();
7619            }
7620          }
7621          /**
7622           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7623           */
7624          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7625            if (reqInfoBuilder_ == null) {
7626              if (value == null) {
7627                throw new NullPointerException();
7628              }
7629              reqInfo_ = value;
7630              onChanged();
7631            } else {
7632              reqInfoBuilder_.setMessage(value);
7633            }
7634            bitField0_ |= 0x00000001;
7635            return this;
7636          }
7637          /**
7638           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7639           */
7640          public Builder setReqInfo(
7641              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
7642            if (reqInfoBuilder_ == null) {
7643              reqInfo_ = builderForValue.build();
7644              onChanged();
7645            } else {
7646              reqInfoBuilder_.setMessage(builderForValue.build());
7647            }
7648            bitField0_ |= 0x00000001;
7649            return this;
7650          }
7651          /**
7652           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7653           */
7654          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7655            if (reqInfoBuilder_ == null) {
7656              if (((bitField0_ & 0x00000001) == 0x00000001) &&
7657                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
7658                reqInfo_ =
7659                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
7660              } else {
7661                reqInfo_ = value;
7662              }
7663              onChanged();
7664            } else {
7665              reqInfoBuilder_.mergeFrom(value);
7666            }
7667            bitField0_ |= 0x00000001;
7668            return this;
7669          }
7670          /**
7671           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7672           */
7673          public Builder clearReqInfo() {
7674            if (reqInfoBuilder_ == null) {
7675              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7676              onChanged();
7677            } else {
7678              reqInfoBuilder_.clear();
7679            }
7680            bitField0_ = (bitField0_ & ~0x00000001);
7681            return this;
7682          }
7683          /**
7684           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7685           */
7686          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
7687            bitField0_ |= 0x00000001;
7688            onChanged();
7689            return getReqInfoFieldBuilder().getBuilder();
7690          }
7691          /**
7692           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7693           */
7694          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7695            if (reqInfoBuilder_ != null) {
7696              return reqInfoBuilder_.getMessageOrBuilder();
7697            } else {
7698              return reqInfo_;
7699            }
7700          }
7701          /**
7702           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7703           */
7704          private com.google.protobuf.SingleFieldBuilder<
7705              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
7706              getReqInfoFieldBuilder() {
7707            if (reqInfoBuilder_ == null) {
7708              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7709                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
7710                      reqInfo_,
7711                      getParentForChildren(),
7712                      isClean());
7713              reqInfo_ = null;
7714            }
7715            return reqInfoBuilder_;
7716          }
7717    
7718          // required uint64 minTxIdToKeep = 2;
7719          private long minTxIdToKeep_ ;
7720          /**
7721           * <code>required uint64 minTxIdToKeep = 2;</code>
7722           */
7723          public boolean hasMinTxIdToKeep() {
7724            return ((bitField0_ & 0x00000002) == 0x00000002);
7725          }
7726          /**
7727           * <code>required uint64 minTxIdToKeep = 2;</code>
7728           */
7729          public long getMinTxIdToKeep() {
7730            return minTxIdToKeep_;
7731          }
7732          /**
7733           * <code>required uint64 minTxIdToKeep = 2;</code>
7734           */
7735          public Builder setMinTxIdToKeep(long value) {
7736            bitField0_ |= 0x00000002;
7737            minTxIdToKeep_ = value;
7738            onChanged();
7739            return this;
7740          }
7741          /**
7742           * <code>required uint64 minTxIdToKeep = 2;</code>
7743           */
7744          public Builder clearMinTxIdToKeep() {
7745            bitField0_ = (bitField0_ & ~0x00000002);
7746            minTxIdToKeep_ = 0L;
7747            onChanged();
7748            return this;
7749          }
7750    
7751          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsRequestProto)
7752        }
7753    
7754        static {
7755          defaultInstance = new PurgeLogsRequestProto(true);
7756          defaultInstance.initFields();
7757        }
7758    
7759        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsRequestProto)
7760      }
7761    
7762      public interface PurgeLogsResponseProtoOrBuilder
7763          extends com.google.protobuf.MessageOrBuilder {
7764      }
7765      /**
7766       * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7767       */
7768      public static final class PurgeLogsResponseProto extends
7769          com.google.protobuf.GeneratedMessage
7770          implements PurgeLogsResponseProtoOrBuilder {
7771        // Use PurgeLogsResponseProto.newBuilder() to construct.
7772        private PurgeLogsResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7773          super(builder);
7774          this.unknownFields = builder.getUnknownFields();
7775        }
7776        private PurgeLogsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7777    
7778        private static final PurgeLogsResponseProto defaultInstance;
7779        public static PurgeLogsResponseProto getDefaultInstance() {
7780          return defaultInstance;
7781        }
7782    
7783        public PurgeLogsResponseProto getDefaultInstanceForType() {
7784          return defaultInstance;
7785        }
7786    
7787        private final com.google.protobuf.UnknownFieldSet unknownFields;
7788        @java.lang.Override
7789        public final com.google.protobuf.UnknownFieldSet
7790            getUnknownFields() {
7791          return this.unknownFields;
7792        }
7793        private PurgeLogsResponseProto(
7794            com.google.protobuf.CodedInputStream input,
7795            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7796            throws com.google.protobuf.InvalidProtocolBufferException {
7797          initFields();
7798          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7799              com.google.protobuf.UnknownFieldSet.newBuilder();
7800          try {
7801            boolean done = false;
7802            while (!done) {
7803              int tag = input.readTag();
7804              switch (tag) {
7805                case 0:
7806                  done = true;
7807                  break;
7808                default: {
7809                  if (!parseUnknownField(input, unknownFields,
7810                                         extensionRegistry, tag)) {
7811                    done = true;
7812                  }
7813                  break;
7814                }
7815              }
7816            }
7817          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7818            throw e.setUnfinishedMessage(this);
7819          } catch (java.io.IOException e) {
7820            throw new com.google.protobuf.InvalidProtocolBufferException(
7821                e.getMessage()).setUnfinishedMessage(this);
7822          } finally {
7823            this.unknownFields = unknownFields.build();
7824            makeExtensionsImmutable();
7825          }
7826        }
7827        public static final com.google.protobuf.Descriptors.Descriptor
7828            getDescriptor() {
7829          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7830        }
7831    
7832        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7833            internalGetFieldAccessorTable() {
7834          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7835              .ensureFieldAccessorsInitialized(
7836                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
7837        }
7838    
7839        public static com.google.protobuf.Parser<PurgeLogsResponseProto> PARSER =
7840            new com.google.protobuf.AbstractParser<PurgeLogsResponseProto>() {
7841          public PurgeLogsResponseProto parsePartialFrom(
7842              com.google.protobuf.CodedInputStream input,
7843              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7844              throws com.google.protobuf.InvalidProtocolBufferException {
7845            return new PurgeLogsResponseProto(input, extensionRegistry);
7846          }
7847        };
7848    
7849        @java.lang.Override
7850        public com.google.protobuf.Parser<PurgeLogsResponseProto> getParserForType() {
7851          return PARSER;
7852        }
7853    
7854        private void initFields() {
7855        }
7856        private byte memoizedIsInitialized = -1;
7857        public final boolean isInitialized() {
7858          byte isInitialized = memoizedIsInitialized;
7859          if (isInitialized != -1) return isInitialized == 1;
7860    
7861          memoizedIsInitialized = 1;
7862          return true;
7863        }
7864    
7865        public void writeTo(com.google.protobuf.CodedOutputStream output)
7866                            throws java.io.IOException {
7867          getSerializedSize();
7868          getUnknownFields().writeTo(output);
7869        }
7870    
7871        private int memoizedSerializedSize = -1;
7872        public int getSerializedSize() {
7873          int size = memoizedSerializedSize;
7874          if (size != -1) return size;
7875    
7876          size = 0;
7877          size += getUnknownFields().getSerializedSize();
7878          memoizedSerializedSize = size;
7879          return size;
7880        }
7881    
7882        private static final long serialVersionUID = 0L;
7883        @java.lang.Override
7884        protected java.lang.Object writeReplace()
7885            throws java.io.ObjectStreamException {
7886          return super.writeReplace();
7887        }
7888    
7889        @java.lang.Override
7890        public boolean equals(final java.lang.Object obj) {
7891          if (obj == this) {
7892           return true;
7893          }
7894          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
7895            return super.equals(obj);
7896          }
7897          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
7898    
7899          boolean result = true;
7900          result = result &&
7901              getUnknownFields().equals(other.getUnknownFields());
7902          return result;
7903        }
7904    
7905        private int memoizedHashCode = 0;
7906        @java.lang.Override
7907        public int hashCode() {
7908          if (memoizedHashCode != 0) {
7909            return memoizedHashCode;
7910          }
7911          int hash = 41;
7912          hash = (19 * hash) + getDescriptorForType().hashCode();
7913          hash = (29 * hash) + getUnknownFields().hashCode();
7914          memoizedHashCode = hash;
7915          return hash;
7916        }
7917    
7918        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7919            com.google.protobuf.ByteString data)
7920            throws com.google.protobuf.InvalidProtocolBufferException {
7921          return PARSER.parseFrom(data);
7922        }
7923        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7924            com.google.protobuf.ByteString data,
7925            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7926            throws com.google.protobuf.InvalidProtocolBufferException {
7927          return PARSER.parseFrom(data, extensionRegistry);
7928        }
7929        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
7930            throws com.google.protobuf.InvalidProtocolBufferException {
7931          return PARSER.parseFrom(data);
7932        }
7933        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7934            byte[] data,
7935            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7936            throws com.google.protobuf.InvalidProtocolBufferException {
7937          return PARSER.parseFrom(data, extensionRegistry);
7938        }
7939        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
7940            throws java.io.IOException {
7941          return PARSER.parseFrom(input);
7942        }
7943        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7944            java.io.InputStream input,
7945            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7946            throws java.io.IOException {
7947          return PARSER.parseFrom(input, extensionRegistry);
7948        }
7949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
7950            throws java.io.IOException {
7951          return PARSER.parseDelimitedFrom(input);
7952        }
7953        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
7954            java.io.InputStream input,
7955            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7956            throws java.io.IOException {
7957          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7958        }
7959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7960            com.google.protobuf.CodedInputStream input)
7961            throws java.io.IOException {
7962          return PARSER.parseFrom(input);
7963        }
7964        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7965            com.google.protobuf.CodedInputStream input,
7966            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7967            throws java.io.IOException {
7968          return PARSER.parseFrom(input, extensionRegistry);
7969        }
7970    
7971        public static Builder newBuilder() { return Builder.create(); }
7972        public Builder newBuilderForType() { return newBuilder(); }
7973        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
7974          return newBuilder().mergeFrom(prototype);
7975        }
7976        public Builder toBuilder() { return newBuilder(this); }
7977    
7978        @java.lang.Override
7979        protected Builder newBuilderForType(
7980            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7981          Builder builder = new Builder(parent);
7982          return builder;
7983        }
7984        /**
7985         * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7986         */
7987        public static final class Builder extends
7988            com.google.protobuf.GeneratedMessage.Builder<Builder>
7989           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
7990          public static final com.google.protobuf.Descriptors.Descriptor
7991              getDescriptor() {
7992            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7993          }
7994    
7995          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7996              internalGetFieldAccessorTable() {
7997            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7998                .ensureFieldAccessorsInitialized(
7999                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
8000          }
8001    
8002          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
8003          private Builder() {
8004            maybeForceBuilderInitialization();
8005          }
8006    
8007          private Builder(
8008              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8009            super(parent);
8010            maybeForceBuilderInitialization();
8011          }
8012          private void maybeForceBuilderInitialization() {
8013            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8014            }
8015          }
8016          private static Builder create() {
8017            return new Builder();
8018          }
8019    
8020          public Builder clear() {
8021            super.clear();
8022            return this;
8023          }
8024    
8025          public Builder clone() {
8026            return create().mergeFrom(buildPartial());
8027          }
8028    
8029          public com.google.protobuf.Descriptors.Descriptor
8030              getDescriptorForType() {
8031            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
8032          }
8033    
8034          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
8035            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
8036          }
8037    
8038          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
8039            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
8040            if (!result.isInitialized()) {
8041              throw newUninitializedMessageException(result);
8042            }
8043            return result;
8044          }
8045    
8046          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
8047            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
8048            onBuilt();
8049            return result;
8050          }
8051    
8052          public Builder mergeFrom(com.google.protobuf.Message other) {
8053            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
8054              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
8055            } else {
8056              super.mergeFrom(other);
8057              return this;
8058            }
8059          }
8060    
8061          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
8062            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
8063            this.mergeUnknownFields(other.getUnknownFields());
8064            return this;
8065          }
8066    
8067          public final boolean isInitialized() {
8068            return true;
8069          }
8070    
8071          public Builder mergeFrom(
8072              com.google.protobuf.CodedInputStream input,
8073              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8074              throws java.io.IOException {
8075            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parsedMessage = null;
8076            try {
8077              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8078            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8079              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) e.getUnfinishedMessage();
8080              throw e;
8081            } finally {
8082              if (parsedMessage != null) {
8083                mergeFrom(parsedMessage);
8084              }
8085            }
8086            return this;
8087          }
8088    
8089          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsResponseProto)
8090        }
8091    
8092        static {
8093          defaultInstance = new PurgeLogsResponseProto(true);
8094          defaultInstance.initFields();
8095        }
8096    
8097        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsResponseProto)
8098      }
8099    
8100      public interface IsFormattedRequestProtoOrBuilder
8101          extends com.google.protobuf.MessageOrBuilder {
8102    
8103        // required .hadoop.hdfs.JournalIdProto jid = 1;
8104        /**
8105         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8106         */
8107        boolean hasJid();
8108        /**
8109         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8110         */
8111        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
8112        /**
8113         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8114         */
8115        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
8116      }
8117      /**
8118       * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8119       *
8120       * <pre>
8121       **
8122       * isFormatted()
8123       * </pre>
8124       */
8125      public static final class IsFormattedRequestProto extends
8126          com.google.protobuf.GeneratedMessage
8127          implements IsFormattedRequestProtoOrBuilder {
8128        // Use IsFormattedRequestProto.newBuilder() to construct.
8129        private IsFormattedRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8130          super(builder);
8131          this.unknownFields = builder.getUnknownFields();
8132        }
8133        private IsFormattedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8134    
8135        private static final IsFormattedRequestProto defaultInstance;
8136        public static IsFormattedRequestProto getDefaultInstance() {
8137          return defaultInstance;
8138        }
8139    
8140        public IsFormattedRequestProto getDefaultInstanceForType() {
8141          return defaultInstance;
8142        }
8143    
8144        private final com.google.protobuf.UnknownFieldSet unknownFields;
8145        @java.lang.Override
8146        public final com.google.protobuf.UnknownFieldSet
8147            getUnknownFields() {
8148          return this.unknownFields;
8149        }
8150        private IsFormattedRequestProto(
8151            com.google.protobuf.CodedInputStream input,
8152            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8153            throws com.google.protobuf.InvalidProtocolBufferException {
8154          initFields();
8155          int mutable_bitField0_ = 0;
8156          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8157              com.google.protobuf.UnknownFieldSet.newBuilder();
8158          try {
8159            boolean done = false;
8160            while (!done) {
8161              int tag = input.readTag();
8162              switch (tag) {
8163                case 0:
8164                  done = true;
8165                  break;
8166                default: {
8167                  if (!parseUnknownField(input, unknownFields,
8168                                         extensionRegistry, tag)) {
8169                    done = true;
8170                  }
8171                  break;
8172                }
8173                case 10: {
8174                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
8175                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
8176                    subBuilder = jid_.toBuilder();
8177                  }
8178                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
8179                  if (subBuilder != null) {
8180                    subBuilder.mergeFrom(jid_);
8181                    jid_ = subBuilder.buildPartial();
8182                  }
8183                  bitField0_ |= 0x00000001;
8184                  break;
8185                }
8186              }
8187            }
8188          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8189            throw e.setUnfinishedMessage(this);
8190          } catch (java.io.IOException e) {
8191            throw new com.google.protobuf.InvalidProtocolBufferException(
8192                e.getMessage()).setUnfinishedMessage(this);
8193          } finally {
8194            this.unknownFields = unknownFields.build();
8195            makeExtensionsImmutable();
8196          }
8197        }
8198        public static final com.google.protobuf.Descriptors.Descriptor
8199            getDescriptor() {
8200          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8201        }
8202    
8203        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8204            internalGetFieldAccessorTable() {
8205          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8206              .ensureFieldAccessorsInitialized(
8207                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8208        }
8209    
8210        public static com.google.protobuf.Parser<IsFormattedRequestProto> PARSER =
8211            new com.google.protobuf.AbstractParser<IsFormattedRequestProto>() {
8212          public IsFormattedRequestProto parsePartialFrom(
8213              com.google.protobuf.CodedInputStream input,
8214              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8215              throws com.google.protobuf.InvalidProtocolBufferException {
8216            return new IsFormattedRequestProto(input, extensionRegistry);
8217          }
8218        };
8219    
8220        @java.lang.Override
8221        public com.google.protobuf.Parser<IsFormattedRequestProto> getParserForType() {
8222          return PARSER;
8223        }
8224    
8225        private int bitField0_;
8226        // required .hadoop.hdfs.JournalIdProto jid = 1;
8227        public static final int JID_FIELD_NUMBER = 1;
8228        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
8229        /**
8230         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8231         */
8232        public boolean hasJid() {
8233          return ((bitField0_ & 0x00000001) == 0x00000001);
8234        }
8235        /**
8236         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8237         */
8238        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8239          return jid_;
8240        }
8241        /**
8242         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8243         */
8244        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8245          return jid_;
8246        }
8247    
8248        private void initFields() {
8249          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8250        }
8251        private byte memoizedIsInitialized = -1;
8252        public final boolean isInitialized() {
8253          byte isInitialized = memoizedIsInitialized;
8254          if (isInitialized != -1) return isInitialized == 1;
8255    
8256          if (!hasJid()) {
8257            memoizedIsInitialized = 0;
8258            return false;
8259          }
8260          if (!getJid().isInitialized()) {
8261            memoizedIsInitialized = 0;
8262            return false;
8263          }
8264          memoizedIsInitialized = 1;
8265          return true;
8266        }
8267    
8268        public void writeTo(com.google.protobuf.CodedOutputStream output)
8269                            throws java.io.IOException {
8270          getSerializedSize();
8271          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8272            output.writeMessage(1, jid_);
8273          }
8274          getUnknownFields().writeTo(output);
8275        }
8276    
8277        private int memoizedSerializedSize = -1;
8278        public int getSerializedSize() {
8279          int size = memoizedSerializedSize;
8280          if (size != -1) return size;
8281    
8282          size = 0;
8283          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8284            size += com.google.protobuf.CodedOutputStream
8285              .computeMessageSize(1, jid_);
8286          }
8287          size += getUnknownFields().getSerializedSize();
8288          memoizedSerializedSize = size;
8289          return size;
8290        }
8291    
8292        private static final long serialVersionUID = 0L;
8293        @java.lang.Override
8294        protected java.lang.Object writeReplace()
8295            throws java.io.ObjectStreamException {
8296          return super.writeReplace();
8297        }
8298    
8299        @java.lang.Override
8300        public boolean equals(final java.lang.Object obj) {
8301          if (obj == this) {
8302           return true;
8303          }
8304          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
8305            return super.equals(obj);
8306          }
8307          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
8308    
8309          boolean result = true;
8310          result = result && (hasJid() == other.hasJid());
8311          if (hasJid()) {
8312            result = result && getJid()
8313                .equals(other.getJid());
8314          }
8315          result = result &&
8316              getUnknownFields().equals(other.getUnknownFields());
8317          return result;
8318        }
8319    
8320        private int memoizedHashCode = 0;
8321        @java.lang.Override
8322        public int hashCode() {
8323          if (memoizedHashCode != 0) {
8324            return memoizedHashCode;
8325          }
8326          int hash = 41;
8327          hash = (19 * hash) + getDescriptorForType().hashCode();
8328          if (hasJid()) {
8329            hash = (37 * hash) + JID_FIELD_NUMBER;
8330            hash = (53 * hash) + getJid().hashCode();
8331          }
8332          hash = (29 * hash) + getUnknownFields().hashCode();
8333          memoizedHashCode = hash;
8334          return hash;
8335        }
8336    
8337        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8338            com.google.protobuf.ByteString data)
8339            throws com.google.protobuf.InvalidProtocolBufferException {
8340          return PARSER.parseFrom(data);
8341        }
8342        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8343            com.google.protobuf.ByteString data,
8344            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8345            throws com.google.protobuf.InvalidProtocolBufferException {
8346          return PARSER.parseFrom(data, extensionRegistry);
8347        }
8348        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
8349            throws com.google.protobuf.InvalidProtocolBufferException {
8350          return PARSER.parseFrom(data);
8351        }
8352        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8353            byte[] data,
8354            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8355            throws com.google.protobuf.InvalidProtocolBufferException {
8356          return PARSER.parseFrom(data, extensionRegistry);
8357        }
8358        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
8359            throws java.io.IOException {
8360          return PARSER.parseFrom(input);
8361        }
8362        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8363            java.io.InputStream input,
8364            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8365            throws java.io.IOException {
8366          return PARSER.parseFrom(input, extensionRegistry);
8367        }
8368        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
8369            throws java.io.IOException {
8370          return PARSER.parseDelimitedFrom(input);
8371        }
8372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
8373            java.io.InputStream input,
8374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8375            throws java.io.IOException {
8376          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8377        }
8378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8379            com.google.protobuf.CodedInputStream input)
8380            throws java.io.IOException {
8381          return PARSER.parseFrom(input);
8382        }
8383        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8384            com.google.protobuf.CodedInputStream input,
8385            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8386            throws java.io.IOException {
8387          return PARSER.parseFrom(input, extensionRegistry);
8388        }
8389    
8390        public static Builder newBuilder() { return Builder.create(); }
8391        public Builder newBuilderForType() { return newBuilder(); }
8392        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
8393          return newBuilder().mergeFrom(prototype);
8394        }
8395        public Builder toBuilder() { return newBuilder(this); }
8396    
8397        @java.lang.Override
8398        protected Builder newBuilderForType(
8399            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8400          Builder builder = new Builder(parent);
8401          return builder;
8402        }
8403        /**
8404         * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8405         *
8406         * <pre>
8407         **
8408         * isFormatted()
8409         * </pre>
8410         */
8411        public static final class Builder extends
8412            com.google.protobuf.GeneratedMessage.Builder<Builder>
8413           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
8414          public static final com.google.protobuf.Descriptors.Descriptor
8415              getDescriptor() {
8416            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8417          }
8418    
8419          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8420              internalGetFieldAccessorTable() {
8421            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8422                .ensureFieldAccessorsInitialized(
8423                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8424          }
8425    
8426          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
8427          private Builder() {
8428            maybeForceBuilderInitialization();
8429          }
8430    
8431          private Builder(
8432              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8433            super(parent);
8434            maybeForceBuilderInitialization();
8435          }
8436          private void maybeForceBuilderInitialization() {
8437            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8438              getJidFieldBuilder();
8439            }
8440          }
8441          private static Builder create() {
8442            return new Builder();
8443          }
8444    
8445          public Builder clear() {
8446            super.clear();
8447            if (jidBuilder_ == null) {
8448              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8449            } else {
8450              jidBuilder_.clear();
8451            }
8452            bitField0_ = (bitField0_ & ~0x00000001);
8453            return this;
8454          }
8455    
8456          public Builder clone() {
8457            return create().mergeFrom(buildPartial());
8458          }
8459    
8460          public com.google.protobuf.Descriptors.Descriptor
8461              getDescriptorForType() {
8462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8463          }
8464    
8465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
8466            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
8467          }
8468    
8469          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
8470            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
8471            if (!result.isInitialized()) {
8472              throw newUninitializedMessageException(result);
8473            }
8474            return result;
8475          }
8476    
8477          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
8478            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
8479            int from_bitField0_ = bitField0_;
8480            int to_bitField0_ = 0;
8481            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8482              to_bitField0_ |= 0x00000001;
8483            }
8484            if (jidBuilder_ == null) {
8485              result.jid_ = jid_;
8486            } else {
8487              result.jid_ = jidBuilder_.build();
8488            }
8489            result.bitField0_ = to_bitField0_;
8490            onBuilt();
8491            return result;
8492          }
8493    
8494          public Builder mergeFrom(com.google.protobuf.Message other) {
8495            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
8496              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
8497            } else {
8498              super.mergeFrom(other);
8499              return this;
8500            }
8501          }
8502    
8503          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
8504            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
8505            if (other.hasJid()) {
8506              mergeJid(other.getJid());
8507            }
8508            this.mergeUnknownFields(other.getUnknownFields());
8509            return this;
8510          }
8511    
8512          public final boolean isInitialized() {
8513            if (!hasJid()) {
8514              
8515              return false;
8516            }
8517            if (!getJid().isInitialized()) {
8518              
8519              return false;
8520            }
8521            return true;
8522          }
8523    
8524          public Builder mergeFrom(
8525              com.google.protobuf.CodedInputStream input,
8526              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8527              throws java.io.IOException {
8528            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parsedMessage = null;
8529            try {
8530              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8531            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8532              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) e.getUnfinishedMessage();
8533              throw e;
8534            } finally {
8535              if (parsedMessage != null) {
8536                mergeFrom(parsedMessage);
8537              }
8538            }
8539            return this;
8540          }
8541          private int bitField0_;
8542    
8543          // required .hadoop.hdfs.JournalIdProto jid = 1;
8544          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8545          private com.google.protobuf.SingleFieldBuilder<
8546              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
8547          /**
8548           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8549           */
8550          public boolean hasJid() {
8551            return ((bitField0_ & 0x00000001) == 0x00000001);
8552          }
8553          /**
8554           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8555           */
8556          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8557            if (jidBuilder_ == null) {
8558              return jid_;
8559            } else {
8560              return jidBuilder_.getMessage();
8561            }
8562          }
8563          /**
8564           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8565           */
8566          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8567            if (jidBuilder_ == null) {
8568              if (value == null) {
8569                throw new NullPointerException();
8570              }
8571              jid_ = value;
8572              onChanged();
8573            } else {
8574              jidBuilder_.setMessage(value);
8575            }
8576            bitField0_ |= 0x00000001;
8577            return this;
8578          }
8579          /**
8580           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8581           */
8582          public Builder setJid(
8583              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
8584            if (jidBuilder_ == null) {
8585              jid_ = builderForValue.build();
8586              onChanged();
8587            } else {
8588              jidBuilder_.setMessage(builderForValue.build());
8589            }
8590            bitField0_ |= 0x00000001;
8591            return this;
8592          }
8593          /**
8594           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8595           */
8596          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8597            if (jidBuilder_ == null) {
8598              if (((bitField0_ & 0x00000001) == 0x00000001) &&
8599                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
8600                jid_ =
8601                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
8602              } else {
8603                jid_ = value;
8604              }
8605              onChanged();
8606            } else {
8607              jidBuilder_.mergeFrom(value);
8608            }
8609            bitField0_ |= 0x00000001;
8610            return this;
8611          }
8612          /**
8613           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8614           */
8615          public Builder clearJid() {
8616            if (jidBuilder_ == null) {
8617              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8618              onChanged();
8619            } else {
8620              jidBuilder_.clear();
8621            }
8622            bitField0_ = (bitField0_ & ~0x00000001);
8623            return this;
8624          }
8625          /**
8626           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8627           */
8628          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
8629            bitField0_ |= 0x00000001;
8630            onChanged();
8631            return getJidFieldBuilder().getBuilder();
8632          }
8633          /**
8634           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8635           */
8636          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8637            if (jidBuilder_ != null) {
8638              return jidBuilder_.getMessageOrBuilder();
8639            } else {
8640              return jid_;
8641            }
8642          }
8643          /**
8644           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8645           */
8646          private com.google.protobuf.SingleFieldBuilder<
8647              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
8648              getJidFieldBuilder() {
8649            if (jidBuilder_ == null) {
8650              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8651                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
8652                      jid_,
8653                      getParentForChildren(),
8654                      isClean());
8655              jid_ = null;
8656            }
8657            return jidBuilder_;
8658          }
8659    
8660          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedRequestProto)
8661        }
8662    
8663        static {
8664          defaultInstance = new IsFormattedRequestProto(true);
8665          defaultInstance.initFields();
8666        }
8667    
8668        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedRequestProto)
8669      }
8670    
8671      public interface IsFormattedResponseProtoOrBuilder
8672          extends com.google.protobuf.MessageOrBuilder {
8673    
8674        // required bool isFormatted = 1;
8675        /**
8676         * <code>required bool isFormatted = 1;</code>
8677         */
8678        boolean hasIsFormatted();
8679        /**
8680         * <code>required bool isFormatted = 1;</code>
8681         */
8682        boolean getIsFormatted();
8683      }
8684      /**
8685       * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8686       */
8687      public static final class IsFormattedResponseProto extends
8688          com.google.protobuf.GeneratedMessage
8689          implements IsFormattedResponseProtoOrBuilder {
8690        // Use IsFormattedResponseProto.newBuilder() to construct.
8691        private IsFormattedResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8692          super(builder);
8693          this.unknownFields = builder.getUnknownFields();
8694        }
8695        private IsFormattedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8696    
8697        private static final IsFormattedResponseProto defaultInstance;
8698        public static IsFormattedResponseProto getDefaultInstance() {
8699          return defaultInstance;
8700        }
8701    
8702        public IsFormattedResponseProto getDefaultInstanceForType() {
8703          return defaultInstance;
8704        }
8705    
8706        private final com.google.protobuf.UnknownFieldSet unknownFields;
8707        @java.lang.Override
8708        public final com.google.protobuf.UnknownFieldSet
8709            getUnknownFields() {
8710          return this.unknownFields;
8711        }
8712        private IsFormattedResponseProto(
8713            com.google.protobuf.CodedInputStream input,
8714            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8715            throws com.google.protobuf.InvalidProtocolBufferException {
8716          initFields();
8717          int mutable_bitField0_ = 0;
8718          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8719              com.google.protobuf.UnknownFieldSet.newBuilder();
8720          try {
8721            boolean done = false;
8722            while (!done) {
8723              int tag = input.readTag();
8724              switch (tag) {
8725                case 0:
8726                  done = true;
8727                  break;
8728                default: {
8729                  if (!parseUnknownField(input, unknownFields,
8730                                         extensionRegistry, tag)) {
8731                    done = true;
8732                  }
8733                  break;
8734                }
8735                case 8: {
8736                  bitField0_ |= 0x00000001;
8737                  isFormatted_ = input.readBool();
8738                  break;
8739                }
8740              }
8741            }
8742          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8743            throw e.setUnfinishedMessage(this);
8744          } catch (java.io.IOException e) {
8745            throw new com.google.protobuf.InvalidProtocolBufferException(
8746                e.getMessage()).setUnfinishedMessage(this);
8747          } finally {
8748            this.unknownFields = unknownFields.build();
8749            makeExtensionsImmutable();
8750          }
8751        }
8752        public static final com.google.protobuf.Descriptors.Descriptor
8753            getDescriptor() {
8754          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8755        }
8756    
8757        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8758            internalGetFieldAccessorTable() {
8759          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8760              .ensureFieldAccessorsInitialized(
8761                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8762        }
8763    
8764        public static com.google.protobuf.Parser<IsFormattedResponseProto> PARSER =
8765            new com.google.protobuf.AbstractParser<IsFormattedResponseProto>() {
8766          public IsFormattedResponseProto parsePartialFrom(
8767              com.google.protobuf.CodedInputStream input,
8768              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8769              throws com.google.protobuf.InvalidProtocolBufferException {
8770            return new IsFormattedResponseProto(input, extensionRegistry);
8771          }
8772        };
8773    
8774        @java.lang.Override
8775        public com.google.protobuf.Parser<IsFormattedResponseProto> getParserForType() {
8776          return PARSER;
8777        }
8778    
8779        private int bitField0_;
8780        // required bool isFormatted = 1;
8781        public static final int ISFORMATTED_FIELD_NUMBER = 1;
8782        private boolean isFormatted_;
8783        /**
8784         * <code>required bool isFormatted = 1;</code>
8785         */
8786        public boolean hasIsFormatted() {
8787          return ((bitField0_ & 0x00000001) == 0x00000001);
8788        }
8789        /**
8790         * <code>required bool isFormatted = 1;</code>
8791         */
8792        public boolean getIsFormatted() {
8793          return isFormatted_;
8794        }
8795    
8796        private void initFields() {
8797          isFormatted_ = false;
8798        }
8799        private byte memoizedIsInitialized = -1;
8800        public final boolean isInitialized() {
8801          byte isInitialized = memoizedIsInitialized;
8802          if (isInitialized != -1) return isInitialized == 1;
8803    
8804          if (!hasIsFormatted()) {
8805            memoizedIsInitialized = 0;
8806            return false;
8807          }
8808          memoizedIsInitialized = 1;
8809          return true;
8810        }
8811    
8812        public void writeTo(com.google.protobuf.CodedOutputStream output)
8813                            throws java.io.IOException {
8814          getSerializedSize();
8815          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8816            output.writeBool(1, isFormatted_);
8817          }
8818          getUnknownFields().writeTo(output);
8819        }
8820    
8821        private int memoizedSerializedSize = -1;
8822        public int getSerializedSize() {
8823          int size = memoizedSerializedSize;
8824          if (size != -1) return size;
8825    
8826          size = 0;
8827          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8828            size += com.google.protobuf.CodedOutputStream
8829              .computeBoolSize(1, isFormatted_);
8830          }
8831          size += getUnknownFields().getSerializedSize();
8832          memoizedSerializedSize = size;
8833          return size;
8834        }
8835    
8836        private static final long serialVersionUID = 0L;
8837        @java.lang.Override
8838        protected java.lang.Object writeReplace()
8839            throws java.io.ObjectStreamException {
8840          return super.writeReplace();
8841        }
8842    
8843        @java.lang.Override
8844        public boolean equals(final java.lang.Object obj) {
8845          if (obj == this) {
8846           return true;
8847          }
8848          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
8849            return super.equals(obj);
8850          }
8851          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
8852    
8853          boolean result = true;
8854          result = result && (hasIsFormatted() == other.hasIsFormatted());
8855          if (hasIsFormatted()) {
8856            result = result && (getIsFormatted()
8857                == other.getIsFormatted());
8858          }
8859          result = result &&
8860              getUnknownFields().equals(other.getUnknownFields());
8861          return result;
8862        }
8863    
8864        private int memoizedHashCode = 0;
8865        @java.lang.Override
8866        public int hashCode() {
8867          if (memoizedHashCode != 0) {
8868            return memoizedHashCode;
8869          }
8870          int hash = 41;
8871          hash = (19 * hash) + getDescriptorForType().hashCode();
8872          if (hasIsFormatted()) {
8873            hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
8874            hash = (53 * hash) + hashBoolean(getIsFormatted());
8875          }
8876          hash = (29 * hash) + getUnknownFields().hashCode();
8877          memoizedHashCode = hash;
8878          return hash;
8879        }
8880    
8881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8882            com.google.protobuf.ByteString data)
8883            throws com.google.protobuf.InvalidProtocolBufferException {
8884          return PARSER.parseFrom(data);
8885        }
8886        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8887            com.google.protobuf.ByteString data,
8888            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8889            throws com.google.protobuf.InvalidProtocolBufferException {
8890          return PARSER.parseFrom(data, extensionRegistry);
8891        }
8892        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
8893            throws com.google.protobuf.InvalidProtocolBufferException {
8894          return PARSER.parseFrom(data);
8895        }
8896        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8897            byte[] data,
8898            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8899            throws com.google.protobuf.InvalidProtocolBufferException {
8900          return PARSER.parseFrom(data, extensionRegistry);
8901        }
8902        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
8903            throws java.io.IOException {
8904          return PARSER.parseFrom(input);
8905        }
8906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8907            java.io.InputStream input,
8908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8909            throws java.io.IOException {
8910          return PARSER.parseFrom(input, extensionRegistry);
8911        }
8912        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
8913            throws java.io.IOException {
8914          return PARSER.parseDelimitedFrom(input);
8915        }
8916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
8917            java.io.InputStream input,
8918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8919            throws java.io.IOException {
8920          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8921        }
8922        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8923            com.google.protobuf.CodedInputStream input)
8924            throws java.io.IOException {
8925          return PARSER.parseFrom(input);
8926        }
8927        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8928            com.google.protobuf.CodedInputStream input,
8929            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8930            throws java.io.IOException {
8931          return PARSER.parseFrom(input, extensionRegistry);
8932        }
8933    
8934        public static Builder newBuilder() { return Builder.create(); }
8935        public Builder newBuilderForType() { return newBuilder(); }
8936        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
8937          return newBuilder().mergeFrom(prototype);
8938        }
8939        public Builder toBuilder() { return newBuilder(this); }
8940    
8941        @java.lang.Override
8942        protected Builder newBuilderForType(
8943            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8944          Builder builder = new Builder(parent);
8945          return builder;
8946        }
8947        /**
8948         * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8949         */
8950        public static final class Builder extends
8951            com.google.protobuf.GeneratedMessage.Builder<Builder>
8952           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
8953          public static final com.google.protobuf.Descriptors.Descriptor
8954              getDescriptor() {
8955            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8956          }
8957    
8958          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8959              internalGetFieldAccessorTable() {
8960            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8961                .ensureFieldAccessorsInitialized(
8962                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8963          }
8964    
8965          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
8966          private Builder() {
8967            maybeForceBuilderInitialization();
8968          }
8969    
8970          private Builder(
8971              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8972            super(parent);
8973            maybeForceBuilderInitialization();
8974          }
8975          private void maybeForceBuilderInitialization() {
8976            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8977            }
8978          }
8979          private static Builder create() {
8980            return new Builder();
8981          }
8982    
8983          public Builder clear() {
8984            super.clear();
8985            isFormatted_ = false;
8986            bitField0_ = (bitField0_ & ~0x00000001);
8987            return this;
8988          }
8989    
8990          public Builder clone() {
8991            return create().mergeFrom(buildPartial());
8992          }
8993    
8994          public com.google.protobuf.Descriptors.Descriptor
8995              getDescriptorForType() {
8996            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8997          }
8998    
8999          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
9000            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
9001          }
9002    
9003          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
9004            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
9005            if (!result.isInitialized()) {
9006              throw newUninitializedMessageException(result);
9007            }
9008            return result;
9009          }
9010    
9011          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
9012            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
9013            int from_bitField0_ = bitField0_;
9014            int to_bitField0_ = 0;
9015            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9016              to_bitField0_ |= 0x00000001;
9017            }
9018            result.isFormatted_ = isFormatted_;
9019            result.bitField0_ = to_bitField0_;
9020            onBuilt();
9021            return result;
9022          }
9023    
9024          public Builder mergeFrom(com.google.protobuf.Message other) {
9025            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
9026              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
9027            } else {
9028              super.mergeFrom(other);
9029              return this;
9030            }
9031          }
9032    
9033          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
9034            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
9035            if (other.hasIsFormatted()) {
9036              setIsFormatted(other.getIsFormatted());
9037            }
9038            this.mergeUnknownFields(other.getUnknownFields());
9039            return this;
9040          }
9041    
9042          public final boolean isInitialized() {
9043            if (!hasIsFormatted()) {
9044              
9045              return false;
9046            }
9047            return true;
9048          }
9049    
9050          public Builder mergeFrom(
9051              com.google.protobuf.CodedInputStream input,
9052              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9053              throws java.io.IOException {
9054            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parsedMessage = null;
9055            try {
9056              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9057            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9058              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) e.getUnfinishedMessage();
9059              throw e;
9060            } finally {
9061              if (parsedMessage != null) {
9062                mergeFrom(parsedMessage);
9063              }
9064            }
9065            return this;
9066          }
9067          private int bitField0_;
9068    
9069          // required bool isFormatted = 1;
9070          private boolean isFormatted_ ;
9071          /**
9072           * <code>required bool isFormatted = 1;</code>
9073           */
9074          public boolean hasIsFormatted() {
9075            return ((bitField0_ & 0x00000001) == 0x00000001);
9076          }
9077          /**
9078           * <code>required bool isFormatted = 1;</code>
9079           */
9080          public boolean getIsFormatted() {
9081            return isFormatted_;
9082          }
9083          /**
9084           * <code>required bool isFormatted = 1;</code>
9085           */
9086          public Builder setIsFormatted(boolean value) {
9087            bitField0_ |= 0x00000001;
9088            isFormatted_ = value;
9089            onChanged();
9090            return this;
9091          }
9092          /**
9093           * <code>required bool isFormatted = 1;</code>
9094           */
9095          public Builder clearIsFormatted() {
9096            bitField0_ = (bitField0_ & ~0x00000001);
9097            isFormatted_ = false;
9098            onChanged();
9099            return this;
9100          }
9101    
9102          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedResponseProto)
9103        }
9104    
9105        static {
9106          defaultInstance = new IsFormattedResponseProto(true);
9107          defaultInstance.initFields();
9108        }
9109    
9110        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedResponseProto)
9111      }
9112    
9113      public interface GetJournalStateRequestProtoOrBuilder
9114          extends com.google.protobuf.MessageOrBuilder {
9115    
9116        // required .hadoop.hdfs.JournalIdProto jid = 1;
9117        /**
9118         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9119         */
9120        boolean hasJid();
9121        /**
9122         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9123         */
9124        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
9125        /**
9126         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9127         */
9128        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
9129      }
9130      /**
9131       * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9132       *
9133       * <pre>
9134       **
9135       * getJournalState()
9136       * </pre>
9137       */
9138      public static final class GetJournalStateRequestProto extends
9139          com.google.protobuf.GeneratedMessage
9140          implements GetJournalStateRequestProtoOrBuilder {
9141        // Use GetJournalStateRequestProto.newBuilder() to construct.
9142        private GetJournalStateRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9143          super(builder);
9144          this.unknownFields = builder.getUnknownFields();
9145        }
9146        private GetJournalStateRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9147    
9148        private static final GetJournalStateRequestProto defaultInstance;
9149        public static GetJournalStateRequestProto getDefaultInstance() {
9150          return defaultInstance;
9151        }
9152    
9153        public GetJournalStateRequestProto getDefaultInstanceForType() {
9154          return defaultInstance;
9155        }
9156    
9157        private final com.google.protobuf.UnknownFieldSet unknownFields;
9158        @java.lang.Override
9159        public final com.google.protobuf.UnknownFieldSet
9160            getUnknownFields() {
9161          return this.unknownFields;
9162        }
9163        private GetJournalStateRequestProto(
9164            com.google.protobuf.CodedInputStream input,
9165            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9166            throws com.google.protobuf.InvalidProtocolBufferException {
9167          initFields();
9168          int mutable_bitField0_ = 0;
9169          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9170              com.google.protobuf.UnknownFieldSet.newBuilder();
9171          try {
9172            boolean done = false;
9173            while (!done) {
9174              int tag = input.readTag();
9175              switch (tag) {
9176                case 0:
9177                  done = true;
9178                  break;
9179                default: {
9180                  if (!parseUnknownField(input, unknownFields,
9181                                         extensionRegistry, tag)) {
9182                    done = true;
9183                  }
9184                  break;
9185                }
9186                case 10: {
9187                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
9188                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
9189                    subBuilder = jid_.toBuilder();
9190                  }
9191                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
9192                  if (subBuilder != null) {
9193                    subBuilder.mergeFrom(jid_);
9194                    jid_ = subBuilder.buildPartial();
9195                  }
9196                  bitField0_ |= 0x00000001;
9197                  break;
9198                }
9199              }
9200            }
9201          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9202            throw e.setUnfinishedMessage(this);
9203          } catch (java.io.IOException e) {
9204            throw new com.google.protobuf.InvalidProtocolBufferException(
9205                e.getMessage()).setUnfinishedMessage(this);
9206          } finally {
9207            this.unknownFields = unknownFields.build();
9208            makeExtensionsImmutable();
9209          }
9210        }
9211        public static final com.google.protobuf.Descriptors.Descriptor
9212            getDescriptor() {
9213          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9214        }
9215    
9216        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9217            internalGetFieldAccessorTable() {
9218          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9219              .ensureFieldAccessorsInitialized(
9220                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9221        }
9222    
9223        public static com.google.protobuf.Parser<GetJournalStateRequestProto> PARSER =
9224            new com.google.protobuf.AbstractParser<GetJournalStateRequestProto>() {
9225          public GetJournalStateRequestProto parsePartialFrom(
9226              com.google.protobuf.CodedInputStream input,
9227              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9228              throws com.google.protobuf.InvalidProtocolBufferException {
9229            return new GetJournalStateRequestProto(input, extensionRegistry);
9230          }
9231        };
9232    
9233        @java.lang.Override
9234        public com.google.protobuf.Parser<GetJournalStateRequestProto> getParserForType() {
9235          return PARSER;
9236        }
9237    
9238        private int bitField0_;
9239        // required .hadoop.hdfs.JournalIdProto jid = 1;
9240        public static final int JID_FIELD_NUMBER = 1;
9241        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
9242        /**
9243         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9244         */
9245        public boolean hasJid() {
9246          return ((bitField0_ & 0x00000001) == 0x00000001);
9247        }
9248        /**
9249         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9250         */
9251        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9252          return jid_;
9253        }
9254        /**
9255         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9256         */
9257        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9258          return jid_;
9259        }
9260    
9261        private void initFields() {
9262          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9263        }
9264        private byte memoizedIsInitialized = -1;
9265        public final boolean isInitialized() {
9266          byte isInitialized = memoizedIsInitialized;
9267          if (isInitialized != -1) return isInitialized == 1;
9268    
9269          if (!hasJid()) {
9270            memoizedIsInitialized = 0;
9271            return false;
9272          }
9273          if (!getJid().isInitialized()) {
9274            memoizedIsInitialized = 0;
9275            return false;
9276          }
9277          memoizedIsInitialized = 1;
9278          return true;
9279        }
9280    
9281        public void writeTo(com.google.protobuf.CodedOutputStream output)
9282                            throws java.io.IOException {
9283          getSerializedSize();
9284          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9285            output.writeMessage(1, jid_);
9286          }
9287          getUnknownFields().writeTo(output);
9288        }
9289    
9290        private int memoizedSerializedSize = -1;
9291        public int getSerializedSize() {
9292          int size = memoizedSerializedSize;
9293          if (size != -1) return size;
9294    
9295          size = 0;
9296          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9297            size += com.google.protobuf.CodedOutputStream
9298              .computeMessageSize(1, jid_);
9299          }
9300          size += getUnknownFields().getSerializedSize();
9301          memoizedSerializedSize = size;
9302          return size;
9303        }
9304    
9305        private static final long serialVersionUID = 0L;
9306        @java.lang.Override
9307        protected java.lang.Object writeReplace()
9308            throws java.io.ObjectStreamException {
9309          return super.writeReplace();
9310        }
9311    
9312        @java.lang.Override
9313        public boolean equals(final java.lang.Object obj) {
9314          if (obj == this) {
9315           return true;
9316          }
9317          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
9318            return super.equals(obj);
9319          }
9320          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
9321    
9322          boolean result = true;
9323          result = result && (hasJid() == other.hasJid());
9324          if (hasJid()) {
9325            result = result && getJid()
9326                .equals(other.getJid());
9327          }
9328          result = result &&
9329              getUnknownFields().equals(other.getUnknownFields());
9330          return result;
9331        }
9332    
9333        private int memoizedHashCode = 0;
9334        @java.lang.Override
9335        public int hashCode() {
9336          if (memoizedHashCode != 0) {
9337            return memoizedHashCode;
9338          }
9339          int hash = 41;
9340          hash = (19 * hash) + getDescriptorForType().hashCode();
9341          if (hasJid()) {
9342            hash = (37 * hash) + JID_FIELD_NUMBER;
9343            hash = (53 * hash) + getJid().hashCode();
9344          }
9345          hash = (29 * hash) + getUnknownFields().hashCode();
9346          memoizedHashCode = hash;
9347          return hash;
9348        }
9349    
9350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9351            com.google.protobuf.ByteString data)
9352            throws com.google.protobuf.InvalidProtocolBufferException {
9353          return PARSER.parseFrom(data);
9354        }
9355        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9356            com.google.protobuf.ByteString data,
9357            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9358            throws com.google.protobuf.InvalidProtocolBufferException {
9359          return PARSER.parseFrom(data, extensionRegistry);
9360        }
9361        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
9362            throws com.google.protobuf.InvalidProtocolBufferException {
9363          return PARSER.parseFrom(data);
9364        }
9365        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9366            byte[] data,
9367            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9368            throws com.google.protobuf.InvalidProtocolBufferException {
9369          return PARSER.parseFrom(data, extensionRegistry);
9370        }
9371        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
9372            throws java.io.IOException {
9373          return PARSER.parseFrom(input);
9374        }
9375        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9376            java.io.InputStream input,
9377            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9378            throws java.io.IOException {
9379          return PARSER.parseFrom(input, extensionRegistry);
9380        }
9381        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
9382            throws java.io.IOException {
9383          return PARSER.parseDelimitedFrom(input);
9384        }
9385        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
9386            java.io.InputStream input,
9387            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9388            throws java.io.IOException {
9389          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9390        }
9391        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9392            com.google.protobuf.CodedInputStream input)
9393            throws java.io.IOException {
9394          return PARSER.parseFrom(input);
9395        }
9396        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9397            com.google.protobuf.CodedInputStream input,
9398            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9399            throws java.io.IOException {
9400          return PARSER.parseFrom(input, extensionRegistry);
9401        }
9402    
9403        public static Builder newBuilder() { return Builder.create(); }
9404        public Builder newBuilderForType() { return newBuilder(); }
9405        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
9406          return newBuilder().mergeFrom(prototype);
9407        }
9408        public Builder toBuilder() { return newBuilder(this); }
9409    
9410        @java.lang.Override
9411        protected Builder newBuilderForType(
9412            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9413          Builder builder = new Builder(parent);
9414          return builder;
9415        }
9416        /**
9417         * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9418         *
9419         * <pre>
9420         **
9421         * getJournalState()
9422         * </pre>
9423         */
9424        public static final class Builder extends
9425            com.google.protobuf.GeneratedMessage.Builder<Builder>
9426           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
9427          public static final com.google.protobuf.Descriptors.Descriptor
9428              getDescriptor() {
9429            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9430          }
9431    
9432          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9433              internalGetFieldAccessorTable() {
9434            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9435                .ensureFieldAccessorsInitialized(
9436                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9437          }
9438    
9439          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
9440          private Builder() {
9441            maybeForceBuilderInitialization();
9442          }
9443    
9444          private Builder(
9445              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9446            super(parent);
9447            maybeForceBuilderInitialization();
9448          }
9449          private void maybeForceBuilderInitialization() {
9450            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9451              getJidFieldBuilder();
9452            }
9453          }
9454          private static Builder create() {
9455            return new Builder();
9456          }
9457    
9458          public Builder clear() {
9459            super.clear();
9460            if (jidBuilder_ == null) {
9461              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9462            } else {
9463              jidBuilder_.clear();
9464            }
9465            bitField0_ = (bitField0_ & ~0x00000001);
9466            return this;
9467          }
9468    
9469          public Builder clone() {
9470            return create().mergeFrom(buildPartial());
9471          }
9472    
9473          public com.google.protobuf.Descriptors.Descriptor
9474              getDescriptorForType() {
9475            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9476          }
9477    
9478          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
9479            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
9480          }
9481    
9482          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
9483            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
9484            if (!result.isInitialized()) {
9485              throw newUninitializedMessageException(result);
9486            }
9487            return result;
9488          }
9489    
9490          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
9491            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
9492            int from_bitField0_ = bitField0_;
9493            int to_bitField0_ = 0;
9494            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9495              to_bitField0_ |= 0x00000001;
9496            }
9497            if (jidBuilder_ == null) {
9498              result.jid_ = jid_;
9499            } else {
9500              result.jid_ = jidBuilder_.build();
9501            }
9502            result.bitField0_ = to_bitField0_;
9503            onBuilt();
9504            return result;
9505          }
9506    
9507          public Builder mergeFrom(com.google.protobuf.Message other) {
9508            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
9509              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
9510            } else {
9511              super.mergeFrom(other);
9512              return this;
9513            }
9514          }
9515    
9516          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
9517            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
9518            if (other.hasJid()) {
9519              mergeJid(other.getJid());
9520            }
9521            this.mergeUnknownFields(other.getUnknownFields());
9522            return this;
9523          }
9524    
9525          public final boolean isInitialized() {
9526            if (!hasJid()) {
9527              
9528              return false;
9529            }
9530            if (!getJid().isInitialized()) {
9531              
9532              return false;
9533            }
9534            return true;
9535          }
9536    
9537          public Builder mergeFrom(
9538              com.google.protobuf.CodedInputStream input,
9539              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9540              throws java.io.IOException {
9541            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parsedMessage = null;
9542            try {
9543              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9544            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9545              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) e.getUnfinishedMessage();
9546              throw e;
9547            } finally {
9548              if (parsedMessage != null) {
9549                mergeFrom(parsedMessage);
9550              }
9551            }
9552            return this;
9553          }
9554          private int bitField0_;
9555    
9556          // required .hadoop.hdfs.JournalIdProto jid = 1;
9557          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9558          private com.google.protobuf.SingleFieldBuilder<
9559              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
9560          /**
9561           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9562           */
9563          public boolean hasJid() {
9564            return ((bitField0_ & 0x00000001) == 0x00000001);
9565          }
9566          /**
9567           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9568           */
9569          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9570            if (jidBuilder_ == null) {
9571              return jid_;
9572            } else {
9573              return jidBuilder_.getMessage();
9574            }
9575          }
9576          /**
9577           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9578           */
9579          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9580            if (jidBuilder_ == null) {
9581              if (value == null) {
9582                throw new NullPointerException();
9583              }
9584              jid_ = value;
9585              onChanged();
9586            } else {
9587              jidBuilder_.setMessage(value);
9588            }
9589            bitField0_ |= 0x00000001;
9590            return this;
9591          }
9592          /**
9593           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9594           */
9595          public Builder setJid(
9596              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
9597            if (jidBuilder_ == null) {
9598              jid_ = builderForValue.build();
9599              onChanged();
9600            } else {
9601              jidBuilder_.setMessage(builderForValue.build());
9602            }
9603            bitField0_ |= 0x00000001;
9604            return this;
9605          }
9606          /**
9607           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9608           */
9609          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9610            if (jidBuilder_ == null) {
9611              if (((bitField0_ & 0x00000001) == 0x00000001) &&
9612                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
9613                jid_ =
9614                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
9615              } else {
9616                jid_ = value;
9617              }
9618              onChanged();
9619            } else {
9620              jidBuilder_.mergeFrom(value);
9621            }
9622            bitField0_ |= 0x00000001;
9623            return this;
9624          }
9625          /**
9626           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9627           */
9628          public Builder clearJid() {
9629            if (jidBuilder_ == null) {
9630              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9631              onChanged();
9632            } else {
9633              jidBuilder_.clear();
9634            }
9635            bitField0_ = (bitField0_ & ~0x00000001);
9636            return this;
9637          }
9638          /**
9639           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9640           */
9641          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
9642            bitField0_ |= 0x00000001;
9643            onChanged();
9644            return getJidFieldBuilder().getBuilder();
9645          }
9646          /**
9647           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9648           */
9649          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9650            if (jidBuilder_ != null) {
9651              return jidBuilder_.getMessageOrBuilder();
9652            } else {
9653              return jid_;
9654            }
9655          }
9656          /**
9657           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9658           */
9659          private com.google.protobuf.SingleFieldBuilder<
9660              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
9661              getJidFieldBuilder() {
9662            if (jidBuilder_ == null) {
9663              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9664                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
9665                      jid_,
9666                      getParentForChildren(),
9667                      isClean());
9668              jid_ = null;
9669            }
9670            return jidBuilder_;
9671          }
9672    
9673          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateRequestProto)
9674        }
9675    
9676        static {
9677          defaultInstance = new GetJournalStateRequestProto(true);
9678          defaultInstance.initFields();
9679        }
9680    
9681        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateRequestProto)
9682      }
9683    
9684      public interface GetJournalStateResponseProtoOrBuilder
9685          extends com.google.protobuf.MessageOrBuilder {
9686    
9687        // required uint64 lastPromisedEpoch = 1;
9688        /**
9689         * <code>required uint64 lastPromisedEpoch = 1;</code>
9690         */
9691        boolean hasLastPromisedEpoch();
9692        /**
9693         * <code>required uint64 lastPromisedEpoch = 1;</code>
9694         */
9695        long getLastPromisedEpoch();
9696    
9697        // required uint32 httpPort = 2;
9698        /**
9699         * <code>required uint32 httpPort = 2;</code>
9700         */
9701        boolean hasHttpPort();
9702        /**
9703         * <code>required uint32 httpPort = 2;</code>
9704         */
9705        int getHttpPort();
9706      }
9707      /**
9708       * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
9709       */
9710      public static final class GetJournalStateResponseProto extends
9711          com.google.protobuf.GeneratedMessage
9712          implements GetJournalStateResponseProtoOrBuilder {
9713        // Use GetJournalStateResponseProto.newBuilder() to construct.
9714        private GetJournalStateResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9715          super(builder);
9716          this.unknownFields = builder.getUnknownFields();
9717        }
9718        private GetJournalStateResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9719    
9720        private static final GetJournalStateResponseProto defaultInstance;
9721        public static GetJournalStateResponseProto getDefaultInstance() {
9722          return defaultInstance;
9723        }
9724    
9725        public GetJournalStateResponseProto getDefaultInstanceForType() {
9726          return defaultInstance;
9727        }
9728    
9729        private final com.google.protobuf.UnknownFieldSet unknownFields;
9730        @java.lang.Override
9731        public final com.google.protobuf.UnknownFieldSet
9732            getUnknownFields() {
9733          return this.unknownFields;
9734        }
9735        private GetJournalStateResponseProto(
9736            com.google.protobuf.CodedInputStream input,
9737            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9738            throws com.google.protobuf.InvalidProtocolBufferException {
9739          initFields();
9740          int mutable_bitField0_ = 0;
9741          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9742              com.google.protobuf.UnknownFieldSet.newBuilder();
9743          try {
9744            boolean done = false;
9745            while (!done) {
9746              int tag = input.readTag();
9747              switch (tag) {
9748                case 0:
9749                  done = true;
9750                  break;
9751                default: {
9752                  if (!parseUnknownField(input, unknownFields,
9753                                         extensionRegistry, tag)) {
9754                    done = true;
9755                  }
9756                  break;
9757                }
9758                case 8: {
9759                  bitField0_ |= 0x00000001;
9760                  lastPromisedEpoch_ = input.readUInt64();
9761                  break;
9762                }
9763                case 16: {
9764                  bitField0_ |= 0x00000002;
9765                  httpPort_ = input.readUInt32();
9766                  break;
9767                }
9768              }
9769            }
9770          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9771            throw e.setUnfinishedMessage(this);
9772          } catch (java.io.IOException e) {
9773            throw new com.google.protobuf.InvalidProtocolBufferException(
9774                e.getMessage()).setUnfinishedMessage(this);
9775          } finally {
9776            this.unknownFields = unknownFields.build();
9777            makeExtensionsImmutable();
9778          }
9779        }
9780        public static final com.google.protobuf.Descriptors.Descriptor
9781            getDescriptor() {
9782          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
9783        }
9784    
9785        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9786            internalGetFieldAccessorTable() {
9787          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
9788              .ensureFieldAccessorsInitialized(
9789                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
9790        }
9791    
9792        public static com.google.protobuf.Parser<GetJournalStateResponseProto> PARSER =
9793            new com.google.protobuf.AbstractParser<GetJournalStateResponseProto>() {
9794          public GetJournalStateResponseProto parsePartialFrom(
9795              com.google.protobuf.CodedInputStream input,
9796              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9797              throws com.google.protobuf.InvalidProtocolBufferException {
9798            return new GetJournalStateResponseProto(input, extensionRegistry);
9799          }
9800        };
9801    
9802        @java.lang.Override
9803        public com.google.protobuf.Parser<GetJournalStateResponseProto> getParserForType() {
9804          return PARSER;
9805        }
9806    
9807        private int bitField0_;
9808        // required uint64 lastPromisedEpoch = 1;
9809        public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
9810        private long lastPromisedEpoch_;
9811        /**
9812         * <code>required uint64 lastPromisedEpoch = 1;</code>
9813         */
9814        public boolean hasLastPromisedEpoch() {
9815          return ((bitField0_ & 0x00000001) == 0x00000001);
9816        }
9817        /**
9818         * <code>required uint64 lastPromisedEpoch = 1;</code>
9819         */
9820        public long getLastPromisedEpoch() {
9821          return lastPromisedEpoch_;
9822        }
9823    
9824        // required uint32 httpPort = 2;
9825        public static final int HTTPPORT_FIELD_NUMBER = 2;
9826        private int httpPort_;
9827        /**
9828         * <code>required uint32 httpPort = 2;</code>
9829         */
9830        public boolean hasHttpPort() {
9831          return ((bitField0_ & 0x00000002) == 0x00000002);
9832        }
9833        /**
9834         * <code>required uint32 httpPort = 2;</code>
9835         */
9836        public int getHttpPort() {
9837          return httpPort_;
9838        }
9839    
9840        private void initFields() {
9841          lastPromisedEpoch_ = 0L;
9842          httpPort_ = 0;
9843        }
9844        private byte memoizedIsInitialized = -1;
9845        public final boolean isInitialized() {
9846          byte isInitialized = memoizedIsInitialized;
9847          if (isInitialized != -1) return isInitialized == 1;
9848    
9849          if (!hasLastPromisedEpoch()) {
9850            memoizedIsInitialized = 0;
9851            return false;
9852          }
9853          if (!hasHttpPort()) {
9854            memoizedIsInitialized = 0;
9855            return false;
9856          }
9857          memoizedIsInitialized = 1;
9858          return true;
9859        }
9860    
9861        public void writeTo(com.google.protobuf.CodedOutputStream output)
9862                            throws java.io.IOException {
9863          getSerializedSize();
9864          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9865            output.writeUInt64(1, lastPromisedEpoch_);
9866          }
9867          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9868            output.writeUInt32(2, httpPort_);
9869          }
9870          getUnknownFields().writeTo(output);
9871        }
9872    
9873        private int memoizedSerializedSize = -1;
9874        public int getSerializedSize() {
9875          int size = memoizedSerializedSize;
9876          if (size != -1) return size;
9877    
9878          size = 0;
9879          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9880            size += com.google.protobuf.CodedOutputStream
9881              .computeUInt64Size(1, lastPromisedEpoch_);
9882          }
9883          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9884            size += com.google.protobuf.CodedOutputStream
9885              .computeUInt32Size(2, httpPort_);
9886          }
9887          size += getUnknownFields().getSerializedSize();
9888          memoizedSerializedSize = size;
9889          return size;
9890        }
9891    
9892        private static final long serialVersionUID = 0L;
9893        @java.lang.Override
9894        protected java.lang.Object writeReplace()
9895            throws java.io.ObjectStreamException {
9896          return super.writeReplace();
9897        }
9898    
9899        @java.lang.Override
9900        public boolean equals(final java.lang.Object obj) {
9901          if (obj == this) {
9902           return true;
9903          }
9904          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
9905            return super.equals(obj);
9906          }
9907          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
9908    
9909          boolean result = true;
9910          result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
9911          if (hasLastPromisedEpoch()) {
9912            result = result && (getLastPromisedEpoch()
9913                == other.getLastPromisedEpoch());
9914          }
9915          result = result && (hasHttpPort() == other.hasHttpPort());
9916          if (hasHttpPort()) {
9917            result = result && (getHttpPort()
9918                == other.getHttpPort());
9919          }
9920          result = result &&
9921              getUnknownFields().equals(other.getUnknownFields());
9922          return result;
9923        }
9924    
9925        private int memoizedHashCode = 0;
9926        @java.lang.Override
9927        public int hashCode() {
9928          if (memoizedHashCode != 0) {
9929            return memoizedHashCode;
9930          }
9931          int hash = 41;
9932          hash = (19 * hash) + getDescriptorForType().hashCode();
9933          if (hasLastPromisedEpoch()) {
9934            hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
9935            hash = (53 * hash) + hashLong(getLastPromisedEpoch());
9936          }
9937          if (hasHttpPort()) {
9938            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
9939            hash = (53 * hash) + getHttpPort();
9940          }
9941          hash = (29 * hash) + getUnknownFields().hashCode();
9942          memoizedHashCode = hash;
9943          return hash;
9944        }
9945    
9946        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9947            com.google.protobuf.ByteString data)
9948            throws com.google.protobuf.InvalidProtocolBufferException {
9949          return PARSER.parseFrom(data);
9950        }
9951        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9952            com.google.protobuf.ByteString data,
9953            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9954            throws com.google.protobuf.InvalidProtocolBufferException {
9955          return PARSER.parseFrom(data, extensionRegistry);
9956        }
9957        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
9958            throws com.google.protobuf.InvalidProtocolBufferException {
9959          return PARSER.parseFrom(data);
9960        }
9961        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9962            byte[] data,
9963            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9964            throws com.google.protobuf.InvalidProtocolBufferException {
9965          return PARSER.parseFrom(data, extensionRegistry);
9966        }
9967        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
9968            throws java.io.IOException {
9969          return PARSER.parseFrom(input);
9970        }
9971        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9972            java.io.InputStream input,
9973            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9974            throws java.io.IOException {
9975          return PARSER.parseFrom(input, extensionRegistry);
9976        }
9977        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
9978            throws java.io.IOException {
9979          return PARSER.parseDelimitedFrom(input);
9980        }
9981        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
9982            java.io.InputStream input,
9983            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9984            throws java.io.IOException {
9985          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9986        }
9987        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9988            com.google.protobuf.CodedInputStream input)
9989            throws java.io.IOException {
9990          return PARSER.parseFrom(input);
9991        }
9992        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9993            com.google.protobuf.CodedInputStream input,
9994            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9995            throws java.io.IOException {
9996          return PARSER.parseFrom(input, extensionRegistry);
9997        }
9998    
9999        public static Builder newBuilder() { return Builder.create(); }
10000        public Builder newBuilderForType() { return newBuilder(); }
10001        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
10002          return newBuilder().mergeFrom(prototype);
10003        }
10004        public Builder toBuilder() { return newBuilder(this); }
10005    
10006        @java.lang.Override
10007        protected Builder newBuilderForType(
10008            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10009          Builder builder = new Builder(parent);
10010          return builder;
10011        }
10012        /**
10013         * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
10014         */
10015        public static final class Builder extends
10016            com.google.protobuf.GeneratedMessage.Builder<Builder>
10017           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
10018          public static final com.google.protobuf.Descriptors.Descriptor
10019              getDescriptor() {
10020            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10021          }
10022    
10023          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10024              internalGetFieldAccessorTable() {
10025            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
10026                .ensureFieldAccessorsInitialized(
10027                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
10028          }
10029    
10030          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
10031          private Builder() {
10032            maybeForceBuilderInitialization();
10033          }
10034    
10035          private Builder(
10036              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10037            super(parent);
10038            maybeForceBuilderInitialization();
10039          }
10040          private void maybeForceBuilderInitialization() {
10041            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10042            }
10043          }
10044          private static Builder create() {
10045            return new Builder();
10046          }
10047    
10048          public Builder clear() {
10049            super.clear();
10050            lastPromisedEpoch_ = 0L;
10051            bitField0_ = (bitField0_ & ~0x00000001);
10052            httpPort_ = 0;
10053            bitField0_ = (bitField0_ & ~0x00000002);
10054            return this;
10055          }
10056    
10057          public Builder clone() {
10058            return create().mergeFrom(buildPartial());
10059          }
10060    
10061          public com.google.protobuf.Descriptors.Descriptor
10062              getDescriptorForType() {
10063            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10064          }
10065    
10066          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
10067            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
10068          }
10069    
10070          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
10071            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
10072            if (!result.isInitialized()) {
10073              throw newUninitializedMessageException(result);
10074            }
10075            return result;
10076          }
10077    
10078          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
10079            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
10080            int from_bitField0_ = bitField0_;
10081            int to_bitField0_ = 0;
10082            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10083              to_bitField0_ |= 0x00000001;
10084            }
10085            result.lastPromisedEpoch_ = lastPromisedEpoch_;
10086            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10087              to_bitField0_ |= 0x00000002;
10088            }
10089            result.httpPort_ = httpPort_;
10090            result.bitField0_ = to_bitField0_;
10091            onBuilt();
10092            return result;
10093          }
10094    
10095          public Builder mergeFrom(com.google.protobuf.Message other) {
10096            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
10097              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
10098            } else {
10099              super.mergeFrom(other);
10100              return this;
10101            }
10102          }
10103    
10104          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
10105            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
10106            if (other.hasLastPromisedEpoch()) {
10107              setLastPromisedEpoch(other.getLastPromisedEpoch());
10108            }
10109            if (other.hasHttpPort()) {
10110              setHttpPort(other.getHttpPort());
10111            }
10112            this.mergeUnknownFields(other.getUnknownFields());
10113            return this;
10114          }
10115    
10116          public final boolean isInitialized() {
10117            if (!hasLastPromisedEpoch()) {
10118              
10119              return false;
10120            }
10121            if (!hasHttpPort()) {
10122              
10123              return false;
10124            }
10125            return true;
10126          }
10127    
10128          public Builder mergeFrom(
10129              com.google.protobuf.CodedInputStream input,
10130              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10131              throws java.io.IOException {
10132            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parsedMessage = null;
10133            try {
10134              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10135            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10136              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) e.getUnfinishedMessage();
10137              throw e;
10138            } finally {
10139              if (parsedMessage != null) {
10140                mergeFrom(parsedMessage);
10141              }
10142            }
10143            return this;
10144          }
10145          private int bitField0_;
10146    
10147          // required uint64 lastPromisedEpoch = 1;
10148          private long lastPromisedEpoch_ ;
10149          /**
10150           * <code>required uint64 lastPromisedEpoch = 1;</code>
10151           */
10152          public boolean hasLastPromisedEpoch() {
10153            return ((bitField0_ & 0x00000001) == 0x00000001);
10154          }
10155          /**
10156           * <code>required uint64 lastPromisedEpoch = 1;</code>
10157           */
10158          public long getLastPromisedEpoch() {
10159            return lastPromisedEpoch_;
10160          }
10161          /**
10162           * <code>required uint64 lastPromisedEpoch = 1;</code>
10163           */
10164          public Builder setLastPromisedEpoch(long value) {
10165            bitField0_ |= 0x00000001;
10166            lastPromisedEpoch_ = value;
10167            onChanged();
10168            return this;
10169          }
10170          /**
10171           * <code>required uint64 lastPromisedEpoch = 1;</code>
10172           */
10173          public Builder clearLastPromisedEpoch() {
10174            bitField0_ = (bitField0_ & ~0x00000001);
10175            lastPromisedEpoch_ = 0L;
10176            onChanged();
10177            return this;
10178          }
10179    
10180          // required uint32 httpPort = 2;
10181          private int httpPort_ ;
10182          /**
10183           * <code>required uint32 httpPort = 2;</code>
10184           */
10185          public boolean hasHttpPort() {
10186            return ((bitField0_ & 0x00000002) == 0x00000002);
10187          }
10188          /**
10189           * <code>required uint32 httpPort = 2;</code>
10190           */
10191          public int getHttpPort() {
10192            return httpPort_;
10193          }
10194          /**
10195           * <code>required uint32 httpPort = 2;</code>
10196           */
10197          public Builder setHttpPort(int value) {
10198            bitField0_ |= 0x00000002;
10199            httpPort_ = value;
10200            onChanged();
10201            return this;
10202          }
10203          /**
10204           * <code>required uint32 httpPort = 2;</code>
10205           */
10206          public Builder clearHttpPort() {
10207            bitField0_ = (bitField0_ & ~0x00000002);
10208            httpPort_ = 0;
10209            onChanged();
10210            return this;
10211          }
10212    
10213          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateResponseProto)
10214        }
10215    
10216        static {
10217          defaultInstance = new GetJournalStateResponseProto(true);
10218          defaultInstance.initFields();
10219        }
10220    
10221        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateResponseProto)
10222      }
10223    
10224      public interface FormatRequestProtoOrBuilder
10225          extends com.google.protobuf.MessageOrBuilder {
10226    
10227        // required .hadoop.hdfs.JournalIdProto jid = 1;
10228        /**
10229         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10230         */
10231        boolean hasJid();
10232        /**
10233         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10234         */
10235        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
10236        /**
10237         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10238         */
10239        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
10240    
10241        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10242        /**
10243         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10244         */
10245        boolean hasNsInfo();
10246        /**
10247         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10248         */
10249        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
10250        /**
10251         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10252         */
10253        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
10254      }
10255      /**
10256       * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10257       *
10258       * <pre>
10259       **
10260       * format()
10261       * </pre>
10262       */
10263      public static final class FormatRequestProto extends
10264          com.google.protobuf.GeneratedMessage
10265          implements FormatRequestProtoOrBuilder {
10266        // Use FormatRequestProto.newBuilder() to construct.
10267        private FormatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10268          super(builder);
10269          this.unknownFields = builder.getUnknownFields();
10270        }
10271        private FormatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10272    
10273        private static final FormatRequestProto defaultInstance;
10274        public static FormatRequestProto getDefaultInstance() {
10275          return defaultInstance;
10276        }
10277    
10278        public FormatRequestProto getDefaultInstanceForType() {
10279          return defaultInstance;
10280        }
10281    
10282        private final com.google.protobuf.UnknownFieldSet unknownFields;
10283        @java.lang.Override
10284        public final com.google.protobuf.UnknownFieldSet
10285            getUnknownFields() {
10286          return this.unknownFields;
10287        }
10288        private FormatRequestProto(
10289            com.google.protobuf.CodedInputStream input,
10290            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10291            throws com.google.protobuf.InvalidProtocolBufferException {
10292          initFields();
10293          int mutable_bitField0_ = 0;
10294          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10295              com.google.protobuf.UnknownFieldSet.newBuilder();
10296          try {
10297            boolean done = false;
10298            while (!done) {
10299              int tag = input.readTag();
10300              switch (tag) {
10301                case 0:
10302                  done = true;
10303                  break;
10304                default: {
10305                  if (!parseUnknownField(input, unknownFields,
10306                                         extensionRegistry, tag)) {
10307                    done = true;
10308                  }
10309                  break;
10310                }
10311                case 10: {
10312                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
10313                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
10314                    subBuilder = jid_.toBuilder();
10315                  }
10316                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
10317                  if (subBuilder != null) {
10318                    subBuilder.mergeFrom(jid_);
10319                    jid_ = subBuilder.buildPartial();
10320                  }
10321                  bitField0_ |= 0x00000001;
10322                  break;
10323                }
10324                case 18: {
10325                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
10326                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
10327                    subBuilder = nsInfo_.toBuilder();
10328                  }
10329                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
10330                  if (subBuilder != null) {
10331                    subBuilder.mergeFrom(nsInfo_);
10332                    nsInfo_ = subBuilder.buildPartial();
10333                  }
10334                  bitField0_ |= 0x00000002;
10335                  break;
10336                }
10337              }
10338            }
10339          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10340            throw e.setUnfinishedMessage(this);
10341          } catch (java.io.IOException e) {
10342            throw new com.google.protobuf.InvalidProtocolBufferException(
10343                e.getMessage()).setUnfinishedMessage(this);
10344          } finally {
10345            this.unknownFields = unknownFields.build();
10346            makeExtensionsImmutable();
10347          }
10348        }
10349        public static final com.google.protobuf.Descriptors.Descriptor
10350            getDescriptor() {
10351          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10352        }
10353    
10354        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10355            internalGetFieldAccessorTable() {
10356          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10357              .ensureFieldAccessorsInitialized(
10358                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10359        }
10360    
10361        public static com.google.protobuf.Parser<FormatRequestProto> PARSER =
10362            new com.google.protobuf.AbstractParser<FormatRequestProto>() {
10363          public FormatRequestProto parsePartialFrom(
10364              com.google.protobuf.CodedInputStream input,
10365              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10366              throws com.google.protobuf.InvalidProtocolBufferException {
10367            return new FormatRequestProto(input, extensionRegistry);
10368          }
10369        };
10370    
10371        @java.lang.Override
10372        public com.google.protobuf.Parser<FormatRequestProto> getParserForType() {
10373          return PARSER;
10374        }
10375    
10376        private int bitField0_;
10377        // required .hadoop.hdfs.JournalIdProto jid = 1;
10378        public static final int JID_FIELD_NUMBER = 1;
10379        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
10380        /**
10381         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10382         */
10383        public boolean hasJid() {
10384          return ((bitField0_ & 0x00000001) == 0x00000001);
10385        }
10386        /**
10387         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10388         */
10389        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10390          return jid_;
10391        }
10392        /**
10393         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10394         */
10395        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10396          return jid_;
10397        }
10398    
10399        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10400        public static final int NSINFO_FIELD_NUMBER = 2;
10401        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
10402        /**
10403         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10404         */
10405        public boolean hasNsInfo() {
10406          return ((bitField0_ & 0x00000002) == 0x00000002);
10407        }
10408        /**
10409         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10410         */
10411        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10412          return nsInfo_;
10413        }
10414        /**
10415         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10416         */
10417        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10418          return nsInfo_;
10419        }
10420    
10421        private void initFields() {
10422          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10423          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10424        }
10425        private byte memoizedIsInitialized = -1;
10426        public final boolean isInitialized() {
10427          byte isInitialized = memoizedIsInitialized;
10428          if (isInitialized != -1) return isInitialized == 1;
10429    
10430          if (!hasJid()) {
10431            memoizedIsInitialized = 0;
10432            return false;
10433          }
10434          if (!hasNsInfo()) {
10435            memoizedIsInitialized = 0;
10436            return false;
10437          }
10438          if (!getJid().isInitialized()) {
10439            memoizedIsInitialized = 0;
10440            return false;
10441          }
10442          if (!getNsInfo().isInitialized()) {
10443            memoizedIsInitialized = 0;
10444            return false;
10445          }
10446          memoizedIsInitialized = 1;
10447          return true;
10448        }
10449    
10450        public void writeTo(com.google.protobuf.CodedOutputStream output)
10451                            throws java.io.IOException {
10452          getSerializedSize();
10453          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10454            output.writeMessage(1, jid_);
10455          }
10456          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10457            output.writeMessage(2, nsInfo_);
10458          }
10459          getUnknownFields().writeTo(output);
10460        }
10461    
10462        private int memoizedSerializedSize = -1;
10463        public int getSerializedSize() {
10464          int size = memoizedSerializedSize;
10465          if (size != -1) return size;
10466    
10467          size = 0;
10468          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10469            size += com.google.protobuf.CodedOutputStream
10470              .computeMessageSize(1, jid_);
10471          }
10472          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10473            size += com.google.protobuf.CodedOutputStream
10474              .computeMessageSize(2, nsInfo_);
10475          }
10476          size += getUnknownFields().getSerializedSize();
10477          memoizedSerializedSize = size;
10478          return size;
10479        }
10480    
10481        private static final long serialVersionUID = 0L;
10482        @java.lang.Override
10483        protected java.lang.Object writeReplace()
10484            throws java.io.ObjectStreamException {
10485          return super.writeReplace();
10486        }
10487    
10488        @java.lang.Override
10489        public boolean equals(final java.lang.Object obj) {
10490          if (obj == this) {
10491           return true;
10492          }
10493          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
10494            return super.equals(obj);
10495          }
10496          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
10497    
10498          boolean result = true;
10499          result = result && (hasJid() == other.hasJid());
10500          if (hasJid()) {
10501            result = result && getJid()
10502                .equals(other.getJid());
10503          }
10504          result = result && (hasNsInfo() == other.hasNsInfo());
10505          if (hasNsInfo()) {
10506            result = result && getNsInfo()
10507                .equals(other.getNsInfo());
10508          }
10509          result = result &&
10510              getUnknownFields().equals(other.getUnknownFields());
10511          return result;
10512        }
10513    
10514        private int memoizedHashCode = 0;
10515        @java.lang.Override
10516        public int hashCode() {
10517          if (memoizedHashCode != 0) {
10518            return memoizedHashCode;
10519          }
10520          int hash = 41;
10521          hash = (19 * hash) + getDescriptorForType().hashCode();
10522          if (hasJid()) {
10523            hash = (37 * hash) + JID_FIELD_NUMBER;
10524            hash = (53 * hash) + getJid().hashCode();
10525          }
10526          if (hasNsInfo()) {
10527            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
10528            hash = (53 * hash) + getNsInfo().hashCode();
10529          }
10530          hash = (29 * hash) + getUnknownFields().hashCode();
10531          memoizedHashCode = hash;
10532          return hash;
10533        }
10534    
10535        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10536            com.google.protobuf.ByteString data)
10537            throws com.google.protobuf.InvalidProtocolBufferException {
10538          return PARSER.parseFrom(data);
10539        }
10540        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10541            com.google.protobuf.ByteString data,
10542            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10543            throws com.google.protobuf.InvalidProtocolBufferException {
10544          return PARSER.parseFrom(data, extensionRegistry);
10545        }
10546        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
10547            throws com.google.protobuf.InvalidProtocolBufferException {
10548          return PARSER.parseFrom(data);
10549        }
10550        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10551            byte[] data,
10552            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10553            throws com.google.protobuf.InvalidProtocolBufferException {
10554          return PARSER.parseFrom(data, extensionRegistry);
10555        }
10556        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
10557            throws java.io.IOException {
10558          return PARSER.parseFrom(input);
10559        }
10560        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10561            java.io.InputStream input,
10562            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10563            throws java.io.IOException {
10564          return PARSER.parseFrom(input, extensionRegistry);
10565        }
10566        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
10567            throws java.io.IOException {
10568          return PARSER.parseDelimitedFrom(input);
10569        }
10570        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
10571            java.io.InputStream input,
10572            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10573            throws java.io.IOException {
10574          return PARSER.parseDelimitedFrom(input, extensionRegistry);
10575        }
10576        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10577            com.google.protobuf.CodedInputStream input)
10578            throws java.io.IOException {
10579          return PARSER.parseFrom(input);
10580        }
10581        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10582            com.google.protobuf.CodedInputStream input,
10583            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10584            throws java.io.IOException {
10585          return PARSER.parseFrom(input, extensionRegistry);
10586        }
10587    
10588        public static Builder newBuilder() { return Builder.create(); }
10589        public Builder newBuilderForType() { return newBuilder(); }
10590        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
10591          return newBuilder().mergeFrom(prototype);
10592        }
10593        public Builder toBuilder() { return newBuilder(this); }
10594    
10595        @java.lang.Override
10596        protected Builder newBuilderForType(
10597            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10598          Builder builder = new Builder(parent);
10599          return builder;
10600        }
10601        /**
10602         * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10603         *
10604         * <pre>
10605         **
10606         * format()
10607         * </pre>
10608         */
10609        public static final class Builder extends
10610            com.google.protobuf.GeneratedMessage.Builder<Builder>
10611           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
10612          public static final com.google.protobuf.Descriptors.Descriptor
10613              getDescriptor() {
10614            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10615          }
10616    
10617          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10618              internalGetFieldAccessorTable() {
10619            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10620                .ensureFieldAccessorsInitialized(
10621                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10622          }
10623    
10624          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
10625          private Builder() {
10626            maybeForceBuilderInitialization();
10627          }
10628    
10629          private Builder(
10630              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10631            super(parent);
10632            maybeForceBuilderInitialization();
10633          }
10634          private void maybeForceBuilderInitialization() {
10635            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10636              getJidFieldBuilder();
10637              getNsInfoFieldBuilder();
10638            }
10639          }
10640          private static Builder create() {
10641            return new Builder();
10642          }
10643    
10644          public Builder clear() {
10645            super.clear();
10646            if (jidBuilder_ == null) {
10647              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10648            } else {
10649              jidBuilder_.clear();
10650            }
10651            bitField0_ = (bitField0_ & ~0x00000001);
10652            if (nsInfoBuilder_ == null) {
10653              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10654            } else {
10655              nsInfoBuilder_.clear();
10656            }
10657            bitField0_ = (bitField0_ & ~0x00000002);
10658            return this;
10659          }
10660    
10661          public Builder clone() {
10662            return create().mergeFrom(buildPartial());
10663          }
10664    
10665          public com.google.protobuf.Descriptors.Descriptor
10666              getDescriptorForType() {
10667            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10668          }
10669    
10670          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
10671            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
10672          }
10673    
10674          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
10675            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
10676            if (!result.isInitialized()) {
10677              throw newUninitializedMessageException(result);
10678            }
10679            return result;
10680          }
10681    
10682          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
10683            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
10684            int from_bitField0_ = bitField0_;
10685            int to_bitField0_ = 0;
10686            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10687              to_bitField0_ |= 0x00000001;
10688            }
10689            if (jidBuilder_ == null) {
10690              result.jid_ = jid_;
10691            } else {
10692              result.jid_ = jidBuilder_.build();
10693            }
10694            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10695              to_bitField0_ |= 0x00000002;
10696            }
10697            if (nsInfoBuilder_ == null) {
10698              result.nsInfo_ = nsInfo_;
10699            } else {
10700              result.nsInfo_ = nsInfoBuilder_.build();
10701            }
10702            result.bitField0_ = to_bitField0_;
10703            onBuilt();
10704            return result;
10705          }
10706    
10707          public Builder mergeFrom(com.google.protobuf.Message other) {
10708            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
10709              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
10710            } else {
10711              super.mergeFrom(other);
10712              return this;
10713            }
10714          }
10715    
10716          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
10717            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
10718            if (other.hasJid()) {
10719              mergeJid(other.getJid());
10720            }
10721            if (other.hasNsInfo()) {
10722              mergeNsInfo(other.getNsInfo());
10723            }
10724            this.mergeUnknownFields(other.getUnknownFields());
10725            return this;
10726          }
10727    
10728          public final boolean isInitialized() {
10729            if (!hasJid()) {
10730              
10731              return false;
10732            }
10733            if (!hasNsInfo()) {
10734              
10735              return false;
10736            }
10737            if (!getJid().isInitialized()) {
10738              
10739              return false;
10740            }
10741            if (!getNsInfo().isInitialized()) {
10742              
10743              return false;
10744            }
10745            return true;
10746          }
10747    
10748          public Builder mergeFrom(
10749              com.google.protobuf.CodedInputStream input,
10750              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10751              throws java.io.IOException {
10752            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parsedMessage = null;
10753            try {
10754              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10755            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10756              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) e.getUnfinishedMessage();
10757              throw e;
10758            } finally {
10759              if (parsedMessage != null) {
10760                mergeFrom(parsedMessage);
10761              }
10762            }
10763            return this;
10764          }
10765          private int bitField0_;
10766    
10767          // required .hadoop.hdfs.JournalIdProto jid = 1;
10768          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10769          private com.google.protobuf.SingleFieldBuilder<
10770              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
10771          /**
10772           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10773           */
10774          public boolean hasJid() {
10775            return ((bitField0_ & 0x00000001) == 0x00000001);
10776          }
10777          /**
10778           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10779           */
10780          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10781            if (jidBuilder_ == null) {
10782              return jid_;
10783            } else {
10784              return jidBuilder_.getMessage();
10785            }
10786          }
10787          /**
10788           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10789           */
10790          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10791            if (jidBuilder_ == null) {
10792              if (value == null) {
10793                throw new NullPointerException();
10794              }
10795              jid_ = value;
10796              onChanged();
10797            } else {
10798              jidBuilder_.setMessage(value);
10799            }
10800            bitField0_ |= 0x00000001;
10801            return this;
10802          }
10803          /**
10804           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10805           */
10806          public Builder setJid(
10807              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
10808            if (jidBuilder_ == null) {
10809              jid_ = builderForValue.build();
10810              onChanged();
10811            } else {
10812              jidBuilder_.setMessage(builderForValue.build());
10813            }
10814            bitField0_ |= 0x00000001;
10815            return this;
10816          }
10817          /**
10818           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10819           */
10820          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10821            if (jidBuilder_ == null) {
10822              if (((bitField0_ & 0x00000001) == 0x00000001) &&
10823                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
10824                jid_ =
10825                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
10826              } else {
10827                jid_ = value;
10828              }
10829              onChanged();
10830            } else {
10831              jidBuilder_.mergeFrom(value);
10832            }
10833            bitField0_ |= 0x00000001;
10834            return this;
10835          }
10836          /**
10837           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10838           */
10839          public Builder clearJid() {
10840            if (jidBuilder_ == null) {
10841              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10842              onChanged();
10843            } else {
10844              jidBuilder_.clear();
10845            }
10846            bitField0_ = (bitField0_ & ~0x00000001);
10847            return this;
10848          }
10849          /**
10850           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10851           */
10852          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
10853            bitField0_ |= 0x00000001;
10854            onChanged();
10855            return getJidFieldBuilder().getBuilder();
10856          }
10857          /**
10858           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10859           */
10860          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10861            if (jidBuilder_ != null) {
10862              return jidBuilder_.getMessageOrBuilder();
10863            } else {
10864              return jid_;
10865            }
10866          }
10867          /**
10868           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10869           */
10870          private com.google.protobuf.SingleFieldBuilder<
10871              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
10872              getJidFieldBuilder() {
10873            if (jidBuilder_ == null) {
10874              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10875                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
10876                      jid_,
10877                      getParentForChildren(),
10878                      isClean());
10879              jid_ = null;
10880            }
10881            return jidBuilder_;
10882          }
10883    
10884          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10885          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10886          private com.google.protobuf.SingleFieldBuilder<
10887              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
10888          /**
10889           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10890           */
10891          public boolean hasNsInfo() {
10892            return ((bitField0_ & 0x00000002) == 0x00000002);
10893          }
10894          /**
10895           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10896           */
10897          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10898            if (nsInfoBuilder_ == null) {
10899              return nsInfo_;
10900            } else {
10901              return nsInfoBuilder_.getMessage();
10902            }
10903          }
10904          /**
10905           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10906           */
10907          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
10908            if (nsInfoBuilder_ == null) {
10909              if (value == null) {
10910                throw new NullPointerException();
10911              }
10912              nsInfo_ = value;
10913              onChanged();
10914            } else {
10915              nsInfoBuilder_.setMessage(value);
10916            }
10917            bitField0_ |= 0x00000002;
10918            return this;
10919          }
10920          /**
10921           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10922           */
10923          public Builder setNsInfo(
10924              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
10925            if (nsInfoBuilder_ == null) {
10926              nsInfo_ = builderForValue.build();
10927              onChanged();
10928            } else {
10929              nsInfoBuilder_.setMessage(builderForValue.build());
10930            }
10931            bitField0_ |= 0x00000002;
10932            return this;
10933          }
10934          /**
10935           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10936           */
10937          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
10938            if (nsInfoBuilder_ == null) {
10939              if (((bitField0_ & 0x00000002) == 0x00000002) &&
10940                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
10941                nsInfo_ =
10942                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
10943              } else {
10944                nsInfo_ = value;
10945              }
10946              onChanged();
10947            } else {
10948              nsInfoBuilder_.mergeFrom(value);
10949            }
10950            bitField0_ |= 0x00000002;
10951            return this;
10952          }
10953          /**
10954           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10955           */
10956          public Builder clearNsInfo() {
10957            if (nsInfoBuilder_ == null) {
10958              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10959              onChanged();
10960            } else {
10961              nsInfoBuilder_.clear();
10962            }
10963            bitField0_ = (bitField0_ & ~0x00000002);
10964            return this;
10965          }
10966          /**
10967           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10968           */
10969          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
10970            bitField0_ |= 0x00000002;
10971            onChanged();
10972            return getNsInfoFieldBuilder().getBuilder();
10973          }
10974          /**
10975           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10976           */
10977          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10978            if (nsInfoBuilder_ != null) {
10979              return nsInfoBuilder_.getMessageOrBuilder();
10980            } else {
10981              return nsInfo_;
10982            }
10983          }
10984          /**
10985           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10986           */
10987          private com.google.protobuf.SingleFieldBuilder<
10988              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
10989              getNsInfoFieldBuilder() {
10990            if (nsInfoBuilder_ == null) {
10991              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10992                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
10993                      nsInfo_,
10994                      getParentForChildren(),
10995                      isClean());
10996              nsInfo_ = null;
10997            }
10998            return nsInfoBuilder_;
10999          }
11000    
11001          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatRequestProto)
11002        }
11003    
11004        static {
11005          defaultInstance = new FormatRequestProto(true);
11006          defaultInstance.initFields();
11007        }
11008    
11009        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatRequestProto)
11010      }
11011    
11012      public interface FormatResponseProtoOrBuilder
11013          extends com.google.protobuf.MessageOrBuilder {
11014      }
11015      /**
11016       * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11017       */
11018      public static final class FormatResponseProto extends
11019          com.google.protobuf.GeneratedMessage
11020          implements FormatResponseProtoOrBuilder {
11021        // Use FormatResponseProto.newBuilder() to construct.
11022        private FormatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11023          super(builder);
11024          this.unknownFields = builder.getUnknownFields();
11025        }
11026        private FormatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11027    
11028        private static final FormatResponseProto defaultInstance;
11029        public static FormatResponseProto getDefaultInstance() {
11030          return defaultInstance;
11031        }
11032    
11033        public FormatResponseProto getDefaultInstanceForType() {
11034          return defaultInstance;
11035        }
11036    
11037        private final com.google.protobuf.UnknownFieldSet unknownFields;
11038        @java.lang.Override
11039        public final com.google.protobuf.UnknownFieldSet
11040            getUnknownFields() {
11041          return this.unknownFields;
11042        }
11043        private FormatResponseProto(
11044            com.google.protobuf.CodedInputStream input,
11045            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11046            throws com.google.protobuf.InvalidProtocolBufferException {
11047          initFields();
11048          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11049              com.google.protobuf.UnknownFieldSet.newBuilder();
11050          try {
11051            boolean done = false;
11052            while (!done) {
11053              int tag = input.readTag();
11054              switch (tag) {
11055                case 0:
11056                  done = true;
11057                  break;
11058                default: {
11059                  if (!parseUnknownField(input, unknownFields,
11060                                         extensionRegistry, tag)) {
11061                    done = true;
11062                  }
11063                  break;
11064                }
11065              }
11066            }
11067          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11068            throw e.setUnfinishedMessage(this);
11069          } catch (java.io.IOException e) {
11070            throw new com.google.protobuf.InvalidProtocolBufferException(
11071                e.getMessage()).setUnfinishedMessage(this);
11072          } finally {
11073            this.unknownFields = unknownFields.build();
11074            makeExtensionsImmutable();
11075          }
11076        }
11077        public static final com.google.protobuf.Descriptors.Descriptor
11078            getDescriptor() {
11079          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11080        }
11081    
11082        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11083            internalGetFieldAccessorTable() {
11084          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11085              .ensureFieldAccessorsInitialized(
11086                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11087        }
11088    
11089        public static com.google.protobuf.Parser<FormatResponseProto> PARSER =
11090            new com.google.protobuf.AbstractParser<FormatResponseProto>() {
11091          public FormatResponseProto parsePartialFrom(
11092              com.google.protobuf.CodedInputStream input,
11093              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11094              throws com.google.protobuf.InvalidProtocolBufferException {
11095            return new FormatResponseProto(input, extensionRegistry);
11096          }
11097        };
11098    
11099        @java.lang.Override
11100        public com.google.protobuf.Parser<FormatResponseProto> getParserForType() {
11101          return PARSER;
11102        }
11103    
11104        private void initFields() {
11105        }
11106        private byte memoizedIsInitialized = -1;
11107        public final boolean isInitialized() {
11108          byte isInitialized = memoizedIsInitialized;
11109          if (isInitialized != -1) return isInitialized == 1;
11110    
11111          memoizedIsInitialized = 1;
11112          return true;
11113        }
11114    
11115        public void writeTo(com.google.protobuf.CodedOutputStream output)
11116                            throws java.io.IOException {
11117          getSerializedSize();
11118          getUnknownFields().writeTo(output);
11119        }
11120    
11121        private int memoizedSerializedSize = -1;
11122        public int getSerializedSize() {
11123          int size = memoizedSerializedSize;
11124          if (size != -1) return size;
11125    
11126          size = 0;
11127          size += getUnknownFields().getSerializedSize();
11128          memoizedSerializedSize = size;
11129          return size;
11130        }
11131    
11132        private static final long serialVersionUID = 0L;
11133        @java.lang.Override
11134        protected java.lang.Object writeReplace()
11135            throws java.io.ObjectStreamException {
11136          return super.writeReplace();
11137        }
11138    
11139        @java.lang.Override
11140        public boolean equals(final java.lang.Object obj) {
11141          if (obj == this) {
11142           return true;
11143          }
11144          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
11145            return super.equals(obj);
11146          }
11147          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
11148    
11149          boolean result = true;
11150          result = result &&
11151              getUnknownFields().equals(other.getUnknownFields());
11152          return result;
11153        }
11154    
11155        private int memoizedHashCode = 0;
11156        @java.lang.Override
11157        public int hashCode() {
11158          if (memoizedHashCode != 0) {
11159            return memoizedHashCode;
11160          }
11161          int hash = 41;
11162          hash = (19 * hash) + getDescriptorForType().hashCode();
11163          hash = (29 * hash) + getUnknownFields().hashCode();
11164          memoizedHashCode = hash;
11165          return hash;
11166        }
11167    
11168        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11169            com.google.protobuf.ByteString data)
11170            throws com.google.protobuf.InvalidProtocolBufferException {
11171          return PARSER.parseFrom(data);
11172        }
11173        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11174            com.google.protobuf.ByteString data,
11175            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11176            throws com.google.protobuf.InvalidProtocolBufferException {
11177          return PARSER.parseFrom(data, extensionRegistry);
11178        }
11179        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
11180            throws com.google.protobuf.InvalidProtocolBufferException {
11181          return PARSER.parseFrom(data);
11182        }
11183        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11184            byte[] data,
11185            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11186            throws com.google.protobuf.InvalidProtocolBufferException {
11187          return PARSER.parseFrom(data, extensionRegistry);
11188        }
11189        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
11190            throws java.io.IOException {
11191          return PARSER.parseFrom(input);
11192        }
11193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11194            java.io.InputStream input,
11195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11196            throws java.io.IOException {
11197          return PARSER.parseFrom(input, extensionRegistry);
11198        }
11199        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
11200            throws java.io.IOException {
11201          return PARSER.parseDelimitedFrom(input);
11202        }
11203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
11204            java.io.InputStream input,
11205            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11206            throws java.io.IOException {
11207          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11208        }
11209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11210            com.google.protobuf.CodedInputStream input)
11211            throws java.io.IOException {
11212          return PARSER.parseFrom(input);
11213        }
11214        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11215            com.google.protobuf.CodedInputStream input,
11216            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11217            throws java.io.IOException {
11218          return PARSER.parseFrom(input, extensionRegistry);
11219        }
11220    
11221        public static Builder newBuilder() { return Builder.create(); }
11222        public Builder newBuilderForType() { return newBuilder(); }
11223        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
11224          return newBuilder().mergeFrom(prototype);
11225        }
11226        public Builder toBuilder() { return newBuilder(this); }
11227    
11228        @java.lang.Override
11229        protected Builder newBuilderForType(
11230            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11231          Builder builder = new Builder(parent);
11232          return builder;
11233        }
11234        /**
11235         * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11236         */
11237        public static final class Builder extends
11238            com.google.protobuf.GeneratedMessage.Builder<Builder>
11239           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
11240          public static final com.google.protobuf.Descriptors.Descriptor
11241              getDescriptor() {
11242            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11243          }
11244    
11245          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11246              internalGetFieldAccessorTable() {
11247            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11248                .ensureFieldAccessorsInitialized(
11249                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11250          }
11251    
11252          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
11253          private Builder() {
11254            maybeForceBuilderInitialization();
11255          }
11256    
11257          private Builder(
11258              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11259            super(parent);
11260            maybeForceBuilderInitialization();
11261          }
11262          private void maybeForceBuilderInitialization() {
11263            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11264            }
11265          }
11266          private static Builder create() {
11267            return new Builder();
11268          }
11269    
11270          public Builder clear() {
11271            super.clear();
11272            return this;
11273          }
11274    
11275          public Builder clone() {
11276            return create().mergeFrom(buildPartial());
11277          }
11278    
11279          public com.google.protobuf.Descriptors.Descriptor
11280              getDescriptorForType() {
11281            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11282          }
11283    
11284          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
11285            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
11286          }
11287    
11288          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
11289            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
11290            if (!result.isInitialized()) {
11291              throw newUninitializedMessageException(result);
11292            }
11293            return result;
11294          }
11295    
11296          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
11297            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
11298            onBuilt();
11299            return result;
11300          }
11301    
11302          public Builder mergeFrom(com.google.protobuf.Message other) {
11303            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
11304              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
11305            } else {
11306              super.mergeFrom(other);
11307              return this;
11308            }
11309          }
11310    
11311          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
11312            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
11313            this.mergeUnknownFields(other.getUnknownFields());
11314            return this;
11315          }
11316    
11317          public final boolean isInitialized() {
11318            return true;
11319          }
11320    
11321          public Builder mergeFrom(
11322              com.google.protobuf.CodedInputStream input,
11323              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11324              throws java.io.IOException {
11325            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parsedMessage = null;
11326            try {
11327              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11328            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11329              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) e.getUnfinishedMessage();
11330              throw e;
11331            } finally {
11332              if (parsedMessage != null) {
11333                mergeFrom(parsedMessage);
11334              }
11335            }
11336            return this;
11337          }
11338    
11339          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatResponseProto)
11340        }
11341    
11342        static {
11343          defaultInstance = new FormatResponseProto(true);
11344          defaultInstance.initFields();
11345        }
11346    
11347        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatResponseProto)
11348      }
11349    
11350      public interface NewEpochRequestProtoOrBuilder
11351          extends com.google.protobuf.MessageOrBuilder {
11352    
11353        // required .hadoop.hdfs.JournalIdProto jid = 1;
11354        /**
11355         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11356         */
11357        boolean hasJid();
11358        /**
11359         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11360         */
11361        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
11362        /**
11363         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11364         */
11365        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
11366    
11367        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11368        /**
11369         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11370         */
11371        boolean hasNsInfo();
11372        /**
11373         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11374         */
11375        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
11376        /**
11377         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11378         */
11379        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
11380    
11381        // required uint64 epoch = 3;
11382        /**
11383         * <code>required uint64 epoch = 3;</code>
11384         */
11385        boolean hasEpoch();
11386        /**
11387         * <code>required uint64 epoch = 3;</code>
11388         */
11389        long getEpoch();
11390      }
11391      /**
11392       * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11393       *
11394       * <pre>
11395       **
11396       * newEpoch()
11397       * </pre>
11398       */
11399      public static final class NewEpochRequestProto extends
11400          com.google.protobuf.GeneratedMessage
11401          implements NewEpochRequestProtoOrBuilder {
11402        // Use NewEpochRequestProto.newBuilder() to construct.
11403        private NewEpochRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11404          super(builder);
11405          this.unknownFields = builder.getUnknownFields();
11406        }
11407        private NewEpochRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11408    
11409        private static final NewEpochRequestProto defaultInstance;
11410        public static NewEpochRequestProto getDefaultInstance() {
11411          return defaultInstance;
11412        }
11413    
11414        public NewEpochRequestProto getDefaultInstanceForType() {
11415          return defaultInstance;
11416        }
11417    
11418        private final com.google.protobuf.UnknownFieldSet unknownFields;
11419        @java.lang.Override
11420        public final com.google.protobuf.UnknownFieldSet
11421            getUnknownFields() {
11422          return this.unknownFields;
11423        }
11424        private NewEpochRequestProto(
11425            com.google.protobuf.CodedInputStream input,
11426            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11427            throws com.google.protobuf.InvalidProtocolBufferException {
11428          initFields();
11429          int mutable_bitField0_ = 0;
11430          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11431              com.google.protobuf.UnknownFieldSet.newBuilder();
11432          try {
11433            boolean done = false;
11434            while (!done) {
11435              int tag = input.readTag();
11436              switch (tag) {
11437                case 0:
11438                  done = true;
11439                  break;
11440                default: {
11441                  if (!parseUnknownField(input, unknownFields,
11442                                         extensionRegistry, tag)) {
11443                    done = true;
11444                  }
11445                  break;
11446                }
11447                case 10: {
11448                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
11449                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
11450                    subBuilder = jid_.toBuilder();
11451                  }
11452                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
11453                  if (subBuilder != null) {
11454                    subBuilder.mergeFrom(jid_);
11455                    jid_ = subBuilder.buildPartial();
11456                  }
11457                  bitField0_ |= 0x00000001;
11458                  break;
11459                }
11460                case 18: {
11461                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
11462                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
11463                    subBuilder = nsInfo_.toBuilder();
11464                  }
11465                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
11466                  if (subBuilder != null) {
11467                    subBuilder.mergeFrom(nsInfo_);
11468                    nsInfo_ = subBuilder.buildPartial();
11469                  }
11470                  bitField0_ |= 0x00000002;
11471                  break;
11472                }
11473                case 24: {
11474                  bitField0_ |= 0x00000004;
11475                  epoch_ = input.readUInt64();
11476                  break;
11477                }
11478              }
11479            }
11480          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11481            throw e.setUnfinishedMessage(this);
11482          } catch (java.io.IOException e) {
11483            throw new com.google.protobuf.InvalidProtocolBufferException(
11484                e.getMessage()).setUnfinishedMessage(this);
11485          } finally {
11486            this.unknownFields = unknownFields.build();
11487            makeExtensionsImmutable();
11488          }
11489        }
11490        public static final com.google.protobuf.Descriptors.Descriptor
11491            getDescriptor() {
11492          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11493        }
11494    
11495        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11496            internalGetFieldAccessorTable() {
11497          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11498              .ensureFieldAccessorsInitialized(
11499                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11500        }
11501    
11502        public static com.google.protobuf.Parser<NewEpochRequestProto> PARSER =
11503            new com.google.protobuf.AbstractParser<NewEpochRequestProto>() {
11504          public NewEpochRequestProto parsePartialFrom(
11505              com.google.protobuf.CodedInputStream input,
11506              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11507              throws com.google.protobuf.InvalidProtocolBufferException {
11508            return new NewEpochRequestProto(input, extensionRegistry);
11509          }
11510        };
11511    
11512        @java.lang.Override
11513        public com.google.protobuf.Parser<NewEpochRequestProto> getParserForType() {
11514          return PARSER;
11515        }
11516    
11517        private int bitField0_;
11518        // required .hadoop.hdfs.JournalIdProto jid = 1;
11519        public static final int JID_FIELD_NUMBER = 1;
11520        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
11521        /**
11522         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11523         */
11524        public boolean hasJid() {
11525          return ((bitField0_ & 0x00000001) == 0x00000001);
11526        }
11527        /**
11528         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11529         */
11530        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11531          return jid_;
11532        }
11533        /**
11534         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11535         */
11536        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11537          return jid_;
11538        }
11539    
11540        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11541        public static final int NSINFO_FIELD_NUMBER = 2;
11542        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
11543        /**
11544         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11545         */
11546        public boolean hasNsInfo() {
11547          return ((bitField0_ & 0x00000002) == 0x00000002);
11548        }
11549        /**
11550         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11551         */
11552        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
11553          return nsInfo_;
11554        }
11555        /**
11556         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11557         */
11558        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
11559          return nsInfo_;
11560        }
11561    
11562        // required uint64 epoch = 3;
11563        public static final int EPOCH_FIELD_NUMBER = 3;
11564        private long epoch_;
11565        /**
11566         * <code>required uint64 epoch = 3;</code>
11567         */
11568        public boolean hasEpoch() {
11569          return ((bitField0_ & 0x00000004) == 0x00000004);
11570        }
11571        /**
11572         * <code>required uint64 epoch = 3;</code>
11573         */
11574        public long getEpoch() {
11575          return epoch_;
11576        }
11577    
11578        private void initFields() {
11579          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11580          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11581          epoch_ = 0L;
11582        }
11583        private byte memoizedIsInitialized = -1;
11584        public final boolean isInitialized() {
11585          byte isInitialized = memoizedIsInitialized;
11586          if (isInitialized != -1) return isInitialized == 1;
11587    
11588          if (!hasJid()) {
11589            memoizedIsInitialized = 0;
11590            return false;
11591          }
11592          if (!hasNsInfo()) {
11593            memoizedIsInitialized = 0;
11594            return false;
11595          }
11596          if (!hasEpoch()) {
11597            memoizedIsInitialized = 0;
11598            return false;
11599          }
11600          if (!getJid().isInitialized()) {
11601            memoizedIsInitialized = 0;
11602            return false;
11603          }
11604          if (!getNsInfo().isInitialized()) {
11605            memoizedIsInitialized = 0;
11606            return false;
11607          }
11608          memoizedIsInitialized = 1;
11609          return true;
11610        }
11611    
11612        public void writeTo(com.google.protobuf.CodedOutputStream output)
11613                            throws java.io.IOException {
11614          getSerializedSize();
11615          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11616            output.writeMessage(1, jid_);
11617          }
11618          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11619            output.writeMessage(2, nsInfo_);
11620          }
11621          if (((bitField0_ & 0x00000004) == 0x00000004)) {
11622            output.writeUInt64(3, epoch_);
11623          }
11624          getUnknownFields().writeTo(output);
11625        }
11626    
11627        private int memoizedSerializedSize = -1;
11628        public int getSerializedSize() {
11629          int size = memoizedSerializedSize;
11630          if (size != -1) return size;
11631    
11632          size = 0;
11633          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11634            size += com.google.protobuf.CodedOutputStream
11635              .computeMessageSize(1, jid_);
11636          }
11637          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11638            size += com.google.protobuf.CodedOutputStream
11639              .computeMessageSize(2, nsInfo_);
11640          }
11641          if (((bitField0_ & 0x00000004) == 0x00000004)) {
11642            size += com.google.protobuf.CodedOutputStream
11643              .computeUInt64Size(3, epoch_);
11644          }
11645          size += getUnknownFields().getSerializedSize();
11646          memoizedSerializedSize = size;
11647          return size;
11648        }
11649    
11650        private static final long serialVersionUID = 0L;
11651        @java.lang.Override
11652        protected java.lang.Object writeReplace()
11653            throws java.io.ObjectStreamException {
11654          return super.writeReplace();
11655        }
11656    
11657        @java.lang.Override
11658        public boolean equals(final java.lang.Object obj) {
11659          if (obj == this) {
11660           return true;
11661          }
11662          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
11663            return super.equals(obj);
11664          }
11665          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
11666    
11667          boolean result = true;
11668          result = result && (hasJid() == other.hasJid());
11669          if (hasJid()) {
11670            result = result && getJid()
11671                .equals(other.getJid());
11672          }
11673          result = result && (hasNsInfo() == other.hasNsInfo());
11674          if (hasNsInfo()) {
11675            result = result && getNsInfo()
11676                .equals(other.getNsInfo());
11677          }
11678          result = result && (hasEpoch() == other.hasEpoch());
11679          if (hasEpoch()) {
11680            result = result && (getEpoch()
11681                == other.getEpoch());
11682          }
11683          result = result &&
11684              getUnknownFields().equals(other.getUnknownFields());
11685          return result;
11686        }
11687    
11688        private int memoizedHashCode = 0;
11689        @java.lang.Override
11690        public int hashCode() {
11691          if (memoizedHashCode != 0) {
11692            return memoizedHashCode;
11693          }
11694          int hash = 41;
11695          hash = (19 * hash) + getDescriptorForType().hashCode();
11696          if (hasJid()) {
11697            hash = (37 * hash) + JID_FIELD_NUMBER;
11698            hash = (53 * hash) + getJid().hashCode();
11699          }
11700          if (hasNsInfo()) {
11701            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
11702            hash = (53 * hash) + getNsInfo().hashCode();
11703          }
11704          if (hasEpoch()) {
11705            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
11706            hash = (53 * hash) + hashLong(getEpoch());
11707          }
11708          hash = (29 * hash) + getUnknownFields().hashCode();
11709          memoizedHashCode = hash;
11710          return hash;
11711        }
11712    
11713        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11714            com.google.protobuf.ByteString data)
11715            throws com.google.protobuf.InvalidProtocolBufferException {
11716          return PARSER.parseFrom(data);
11717        }
11718        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11719            com.google.protobuf.ByteString data,
11720            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11721            throws com.google.protobuf.InvalidProtocolBufferException {
11722          return PARSER.parseFrom(data, extensionRegistry);
11723        }
11724        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
11725            throws com.google.protobuf.InvalidProtocolBufferException {
11726          return PARSER.parseFrom(data);
11727        }
11728        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11729            byte[] data,
11730            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11731            throws com.google.protobuf.InvalidProtocolBufferException {
11732          return PARSER.parseFrom(data, extensionRegistry);
11733        }
11734        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
11735            throws java.io.IOException {
11736          return PARSER.parseFrom(input);
11737        }
11738        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11739            java.io.InputStream input,
11740            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11741            throws java.io.IOException {
11742          return PARSER.parseFrom(input, extensionRegistry);
11743        }
11744        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
11745            throws java.io.IOException {
11746          return PARSER.parseDelimitedFrom(input);
11747        }
11748        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
11749            java.io.InputStream input,
11750            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11751            throws java.io.IOException {
11752          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11753        }
11754        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11755            com.google.protobuf.CodedInputStream input)
11756            throws java.io.IOException {
11757          return PARSER.parseFrom(input);
11758        }
11759        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11760            com.google.protobuf.CodedInputStream input,
11761            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11762            throws java.io.IOException {
11763          return PARSER.parseFrom(input, extensionRegistry);
11764        }
11765    
11766        public static Builder newBuilder() { return Builder.create(); }
11767        public Builder newBuilderForType() { return newBuilder(); }
11768        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
11769          return newBuilder().mergeFrom(prototype);
11770        }
11771        public Builder toBuilder() { return newBuilder(this); }
11772    
11773        @java.lang.Override
11774        protected Builder newBuilderForType(
11775            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11776          Builder builder = new Builder(parent);
11777          return builder;
11778        }
11779        /**
11780         * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11781         *
11782         * <pre>
11783         **
11784         * newEpoch()
11785         * </pre>
11786         */
11787        public static final class Builder extends
11788            com.google.protobuf.GeneratedMessage.Builder<Builder>
11789           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
11790          public static final com.google.protobuf.Descriptors.Descriptor
11791              getDescriptor() {
11792            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11793          }
11794    
11795          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11796              internalGetFieldAccessorTable() {
11797            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11798                .ensureFieldAccessorsInitialized(
11799                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11800          }
11801    
11802          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
11803          private Builder() {
11804            maybeForceBuilderInitialization();
11805          }
11806    
11807          private Builder(
11808              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11809            super(parent);
11810            maybeForceBuilderInitialization();
11811          }
11812          private void maybeForceBuilderInitialization() {
11813            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11814              getJidFieldBuilder();
11815              getNsInfoFieldBuilder();
11816            }
11817          }
11818          private static Builder create() {
11819            return new Builder();
11820          }
11821    
11822          public Builder clear() {
11823            super.clear();
11824            if (jidBuilder_ == null) {
11825              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11826            } else {
11827              jidBuilder_.clear();
11828            }
11829            bitField0_ = (bitField0_ & ~0x00000001);
11830            if (nsInfoBuilder_ == null) {
11831              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11832            } else {
11833              nsInfoBuilder_.clear();
11834            }
11835            bitField0_ = (bitField0_ & ~0x00000002);
11836            epoch_ = 0L;
11837            bitField0_ = (bitField0_ & ~0x00000004);
11838            return this;
11839          }
11840    
11841          public Builder clone() {
11842            return create().mergeFrom(buildPartial());
11843          }
11844    
11845          public com.google.protobuf.Descriptors.Descriptor
11846              getDescriptorForType() {
11847            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11848          }
11849    
11850          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
11851            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
11852          }
11853    
11854          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
11855            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
11856            if (!result.isInitialized()) {
11857              throw newUninitializedMessageException(result);
11858            }
11859            return result;
11860          }
11861    
11862          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
11863            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
11864            int from_bitField0_ = bitField0_;
11865            int to_bitField0_ = 0;
11866            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11867              to_bitField0_ |= 0x00000001;
11868            }
11869            if (jidBuilder_ == null) {
11870              result.jid_ = jid_;
11871            } else {
11872              result.jid_ = jidBuilder_.build();
11873            }
11874            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11875              to_bitField0_ |= 0x00000002;
11876            }
11877            if (nsInfoBuilder_ == null) {
11878              result.nsInfo_ = nsInfo_;
11879            } else {
11880              result.nsInfo_ = nsInfoBuilder_.build();
11881            }
11882            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
11883              to_bitField0_ |= 0x00000004;
11884            }
11885            result.epoch_ = epoch_;
11886            result.bitField0_ = to_bitField0_;
11887            onBuilt();
11888            return result;
11889          }
11890    
11891          public Builder mergeFrom(com.google.protobuf.Message other) {
11892            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
11893              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
11894            } else {
11895              super.mergeFrom(other);
11896              return this;
11897            }
11898          }
11899    
11900          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
11901            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
11902            if (other.hasJid()) {
11903              mergeJid(other.getJid());
11904            }
11905            if (other.hasNsInfo()) {
11906              mergeNsInfo(other.getNsInfo());
11907            }
11908            if (other.hasEpoch()) {
11909              setEpoch(other.getEpoch());
11910            }
11911            this.mergeUnknownFields(other.getUnknownFields());
11912            return this;
11913          }
11914    
11915          public final boolean isInitialized() {
11916            if (!hasJid()) {
11917              
11918              return false;
11919            }
11920            if (!hasNsInfo()) {
11921              
11922              return false;
11923            }
11924            if (!hasEpoch()) {
11925              
11926              return false;
11927            }
11928            if (!getJid().isInitialized()) {
11929              
11930              return false;
11931            }
11932            if (!getNsInfo().isInitialized()) {
11933              
11934              return false;
11935            }
11936            return true;
11937          }
11938    
11939          public Builder mergeFrom(
11940              com.google.protobuf.CodedInputStream input,
11941              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11942              throws java.io.IOException {
11943            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parsedMessage = null;
11944            try {
11945              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11946            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11947              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) e.getUnfinishedMessage();
11948              throw e;
11949            } finally {
11950              if (parsedMessage != null) {
11951                mergeFrom(parsedMessage);
11952              }
11953            }
11954            return this;
11955          }
11956          private int bitField0_;
11957    
11958          // required .hadoop.hdfs.JournalIdProto jid = 1;
11959          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11960          private com.google.protobuf.SingleFieldBuilder<
11961              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
11962          /**
11963           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11964           */
11965          public boolean hasJid() {
11966            return ((bitField0_ & 0x00000001) == 0x00000001);
11967          }
11968          /**
11969           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11970           */
11971          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11972            if (jidBuilder_ == null) {
11973              return jid_;
11974            } else {
11975              return jidBuilder_.getMessage();
11976            }
11977          }
11978          /**
11979           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11980           */
11981          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
11982            if (jidBuilder_ == null) {
11983              if (value == null) {
11984                throw new NullPointerException();
11985              }
11986              jid_ = value;
11987              onChanged();
11988            } else {
11989              jidBuilder_.setMessage(value);
11990            }
11991            bitField0_ |= 0x00000001;
11992            return this;
11993          }
11994          /**
11995           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11996           */
11997          public Builder setJid(
11998              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
11999            if (jidBuilder_ == null) {
12000              jid_ = builderForValue.build();
12001              onChanged();
12002            } else {
12003              jidBuilder_.setMessage(builderForValue.build());
12004            }
12005            bitField0_ |= 0x00000001;
12006            return this;
12007          }
12008          /**
12009           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12010           */
12011          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12012            if (jidBuilder_ == null) {
12013              if (((bitField0_ & 0x00000001) == 0x00000001) &&
12014                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
12015                jid_ =
12016                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
12017              } else {
12018                jid_ = value;
12019              }
12020              onChanged();
12021            } else {
12022              jidBuilder_.mergeFrom(value);
12023            }
12024            bitField0_ |= 0x00000001;
12025            return this;
12026          }
12027          /**
12028           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12029           */
12030          public Builder clearJid() {
12031            if (jidBuilder_ == null) {
12032              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12033              onChanged();
12034            } else {
12035              jidBuilder_.clear();
12036            }
12037            bitField0_ = (bitField0_ & ~0x00000001);
12038            return this;
12039          }
12040          /**
12041           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12042           */
12043          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
12044            bitField0_ |= 0x00000001;
12045            onChanged();
12046            return getJidFieldBuilder().getBuilder();
12047          }
12048          /**
12049           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12050           */
12051          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12052            if (jidBuilder_ != null) {
12053              return jidBuilder_.getMessageOrBuilder();
12054            } else {
12055              return jid_;
12056            }
12057          }
12058          /**
12059           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12060           */
12061          private com.google.protobuf.SingleFieldBuilder<
12062              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
12063              getJidFieldBuilder() {
12064            if (jidBuilder_ == null) {
12065              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12066                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
12067                      jid_,
12068                      getParentForChildren(),
12069                      isClean());
12070              jid_ = null;
12071            }
12072            return jidBuilder_;
12073          }
12074    
12075          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
12076          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12077          private com.google.protobuf.SingleFieldBuilder<
12078              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
12079          /**
12080           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12081           */
12082          public boolean hasNsInfo() {
12083            return ((bitField0_ & 0x00000002) == 0x00000002);
12084          }
12085          /**
12086           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12087           */
12088          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
12089            if (nsInfoBuilder_ == null) {
12090              return nsInfo_;
12091            } else {
12092              return nsInfoBuilder_.getMessage();
12093            }
12094          }
12095          /**
12096           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12097           */
12098          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12099            if (nsInfoBuilder_ == null) {
12100              if (value == null) {
12101                throw new NullPointerException();
12102              }
12103              nsInfo_ = value;
12104              onChanged();
12105            } else {
12106              nsInfoBuilder_.setMessage(value);
12107            }
12108            bitField0_ |= 0x00000002;
12109            return this;
12110          }
12111          /**
12112           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12113           */
12114          public Builder setNsInfo(
12115              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
12116            if (nsInfoBuilder_ == null) {
12117              nsInfo_ = builderForValue.build();
12118              onChanged();
12119            } else {
12120              nsInfoBuilder_.setMessage(builderForValue.build());
12121            }
12122            bitField0_ |= 0x00000002;
12123            return this;
12124          }
12125          /**
12126           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12127           */
12128          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12129            if (nsInfoBuilder_ == null) {
12130              if (((bitField0_ & 0x00000002) == 0x00000002) &&
12131                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
12132                nsInfo_ =
12133                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
12134              } else {
12135                nsInfo_ = value;
12136              }
12137              onChanged();
12138            } else {
12139              nsInfoBuilder_.mergeFrom(value);
12140            }
12141            bitField0_ |= 0x00000002;
12142            return this;
12143          }
12144          /**
12145           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12146           */
12147          public Builder clearNsInfo() {
12148            if (nsInfoBuilder_ == null) {
12149              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12150              onChanged();
12151            } else {
12152              nsInfoBuilder_.clear();
12153            }
12154            bitField0_ = (bitField0_ & ~0x00000002);
12155            return this;
12156          }
12157          /**
12158           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12159           */
12160          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
12161            bitField0_ |= 0x00000002;
12162            onChanged();
12163            return getNsInfoFieldBuilder().getBuilder();
12164          }
12165          /**
12166           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12167           */
12168          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
12169            if (nsInfoBuilder_ != null) {
12170              return nsInfoBuilder_.getMessageOrBuilder();
12171            } else {
12172              return nsInfo_;
12173            }
12174          }
12175          /**
12176           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12177           */
12178          private com.google.protobuf.SingleFieldBuilder<
12179              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
12180              getNsInfoFieldBuilder() {
12181            if (nsInfoBuilder_ == null) {
12182              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12183                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
12184                      nsInfo_,
12185                      getParentForChildren(),
12186                      isClean());
12187              nsInfo_ = null;
12188            }
12189            return nsInfoBuilder_;
12190          }
12191    
12192          // required uint64 epoch = 3;
12193          private long epoch_ ;
12194          /**
12195           * <code>required uint64 epoch = 3;</code>
12196           */
12197          public boolean hasEpoch() {
12198            return ((bitField0_ & 0x00000004) == 0x00000004);
12199          }
12200          /**
12201           * <code>required uint64 epoch = 3;</code>
12202           */
12203          public long getEpoch() {
12204            return epoch_;
12205          }
12206          /**
12207           * <code>required uint64 epoch = 3;</code>
12208           */
12209          public Builder setEpoch(long value) {
12210            bitField0_ |= 0x00000004;
12211            epoch_ = value;
12212            onChanged();
12213            return this;
12214          }
12215          /**
12216           * <code>required uint64 epoch = 3;</code>
12217           */
12218          public Builder clearEpoch() {
12219            bitField0_ = (bitField0_ & ~0x00000004);
12220            epoch_ = 0L;
12221            onChanged();
12222            return this;
12223          }
12224    
12225          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochRequestProto)
12226        }
12227    
12228        static {
12229          defaultInstance = new NewEpochRequestProto(true);
12230          defaultInstance.initFields();
12231        }
12232    
12233        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochRequestProto)
12234      }
12235    
12236      public interface NewEpochResponseProtoOrBuilder
12237          extends com.google.protobuf.MessageOrBuilder {
12238    
12239        // optional uint64 lastSegmentTxId = 1;
12240        /**
12241         * <code>optional uint64 lastSegmentTxId = 1;</code>
12242         */
12243        boolean hasLastSegmentTxId();
12244        /**
12245         * <code>optional uint64 lastSegmentTxId = 1;</code>
12246         */
12247        long getLastSegmentTxId();
12248      }
12249      /**
12250       * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12251       */
12252      public static final class NewEpochResponseProto extends
12253          com.google.protobuf.GeneratedMessage
12254          implements NewEpochResponseProtoOrBuilder {
12255        // Use NewEpochResponseProto.newBuilder() to construct.
12256        private NewEpochResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12257          super(builder);
12258          this.unknownFields = builder.getUnknownFields();
12259        }
12260        private NewEpochResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12261    
12262        private static final NewEpochResponseProto defaultInstance;
12263        public static NewEpochResponseProto getDefaultInstance() {
12264          return defaultInstance;
12265        }
12266    
12267        public NewEpochResponseProto getDefaultInstanceForType() {
12268          return defaultInstance;
12269        }
12270    
12271        private final com.google.protobuf.UnknownFieldSet unknownFields;
12272        @java.lang.Override
12273        public final com.google.protobuf.UnknownFieldSet
12274            getUnknownFields() {
12275          return this.unknownFields;
12276        }
12277        private NewEpochResponseProto(
12278            com.google.protobuf.CodedInputStream input,
12279            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12280            throws com.google.protobuf.InvalidProtocolBufferException {
12281          initFields();
12282          int mutable_bitField0_ = 0;
12283          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12284              com.google.protobuf.UnknownFieldSet.newBuilder();
12285          try {
12286            boolean done = false;
12287            while (!done) {
12288              int tag = input.readTag();
12289              switch (tag) {
12290                case 0:
12291                  done = true;
12292                  break;
12293                default: {
12294                  if (!parseUnknownField(input, unknownFields,
12295                                         extensionRegistry, tag)) {
12296                    done = true;
12297                  }
12298                  break;
12299                }
12300                case 8: {
12301                  bitField0_ |= 0x00000001;
12302                  lastSegmentTxId_ = input.readUInt64();
12303                  break;
12304                }
12305              }
12306            }
12307          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12308            throw e.setUnfinishedMessage(this);
12309          } catch (java.io.IOException e) {
12310            throw new com.google.protobuf.InvalidProtocolBufferException(
12311                e.getMessage()).setUnfinishedMessage(this);
12312          } finally {
12313            this.unknownFields = unknownFields.build();
12314            makeExtensionsImmutable();
12315          }
12316        }
12317        public static final com.google.protobuf.Descriptors.Descriptor
12318            getDescriptor() {
12319          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12320        }
12321    
12322        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12323            internalGetFieldAccessorTable() {
12324          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12325              .ensureFieldAccessorsInitialized(
12326                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12327        }
12328    
12329        public static com.google.protobuf.Parser<NewEpochResponseProto> PARSER =
12330            new com.google.protobuf.AbstractParser<NewEpochResponseProto>() {
12331          public NewEpochResponseProto parsePartialFrom(
12332              com.google.protobuf.CodedInputStream input,
12333              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12334              throws com.google.protobuf.InvalidProtocolBufferException {
12335            return new NewEpochResponseProto(input, extensionRegistry);
12336          }
12337        };
12338    
12339        @java.lang.Override
12340        public com.google.protobuf.Parser<NewEpochResponseProto> getParserForType() {
12341          return PARSER;
12342        }
12343    
12344        private int bitField0_;
12345        // optional uint64 lastSegmentTxId = 1;
12346        public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
12347        private long lastSegmentTxId_;
12348        /**
12349         * <code>optional uint64 lastSegmentTxId = 1;</code>
12350         */
12351        public boolean hasLastSegmentTxId() {
12352          return ((bitField0_ & 0x00000001) == 0x00000001);
12353        }
12354        /**
12355         * <code>optional uint64 lastSegmentTxId = 1;</code>
12356         */
12357        public long getLastSegmentTxId() {
12358          return lastSegmentTxId_;
12359        }
12360    
12361        private void initFields() {
12362          lastSegmentTxId_ = 0L;
12363        }
12364        private byte memoizedIsInitialized = -1;
12365        public final boolean isInitialized() {
12366          byte isInitialized = memoizedIsInitialized;
12367          if (isInitialized != -1) return isInitialized == 1;
12368    
12369          memoizedIsInitialized = 1;
12370          return true;
12371        }
12372    
12373        public void writeTo(com.google.protobuf.CodedOutputStream output)
12374                            throws java.io.IOException {
12375          getSerializedSize();
12376          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12377            output.writeUInt64(1, lastSegmentTxId_);
12378          }
12379          getUnknownFields().writeTo(output);
12380        }
12381    
12382        private int memoizedSerializedSize = -1;
12383        public int getSerializedSize() {
12384          int size = memoizedSerializedSize;
12385          if (size != -1) return size;
12386    
12387          size = 0;
12388          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12389            size += com.google.protobuf.CodedOutputStream
12390              .computeUInt64Size(1, lastSegmentTxId_);
12391          }
12392          size += getUnknownFields().getSerializedSize();
12393          memoizedSerializedSize = size;
12394          return size;
12395        }
12396    
12397        private static final long serialVersionUID = 0L;
12398        @java.lang.Override
12399        protected java.lang.Object writeReplace()
12400            throws java.io.ObjectStreamException {
12401          return super.writeReplace();
12402        }
12403    
12404        @java.lang.Override
12405        public boolean equals(final java.lang.Object obj) {
12406          if (obj == this) {
12407           return true;
12408          }
12409          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
12410            return super.equals(obj);
12411          }
12412          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
12413    
12414          boolean result = true;
12415          result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
12416          if (hasLastSegmentTxId()) {
12417            result = result && (getLastSegmentTxId()
12418                == other.getLastSegmentTxId());
12419          }
12420          result = result &&
12421              getUnknownFields().equals(other.getUnknownFields());
12422          return result;
12423        }
12424    
12425        private int memoizedHashCode = 0;
12426        @java.lang.Override
12427        public int hashCode() {
12428          if (memoizedHashCode != 0) {
12429            return memoizedHashCode;
12430          }
12431          int hash = 41;
12432          hash = (19 * hash) + getDescriptorForType().hashCode();
12433          if (hasLastSegmentTxId()) {
12434            hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
12435            hash = (53 * hash) + hashLong(getLastSegmentTxId());
12436          }
12437          hash = (29 * hash) + getUnknownFields().hashCode();
12438          memoizedHashCode = hash;
12439          return hash;
12440        }
12441    
12442        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12443            com.google.protobuf.ByteString data)
12444            throws com.google.protobuf.InvalidProtocolBufferException {
12445          return PARSER.parseFrom(data);
12446        }
12447        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12448            com.google.protobuf.ByteString data,
12449            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12450            throws com.google.protobuf.InvalidProtocolBufferException {
12451          return PARSER.parseFrom(data, extensionRegistry);
12452        }
12453        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
12454            throws com.google.protobuf.InvalidProtocolBufferException {
12455          return PARSER.parseFrom(data);
12456        }
12457        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12458            byte[] data,
12459            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12460            throws com.google.protobuf.InvalidProtocolBufferException {
12461          return PARSER.parseFrom(data, extensionRegistry);
12462        }
12463        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
12464            throws java.io.IOException {
12465          return PARSER.parseFrom(input);
12466        }
12467        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12468            java.io.InputStream input,
12469            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12470            throws java.io.IOException {
12471          return PARSER.parseFrom(input, extensionRegistry);
12472        }
12473        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
12474            throws java.io.IOException {
12475          return PARSER.parseDelimitedFrom(input);
12476        }
12477        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
12478            java.io.InputStream input,
12479            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12480            throws java.io.IOException {
12481          return PARSER.parseDelimitedFrom(input, extensionRegistry);
12482        }
12483        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12484            com.google.protobuf.CodedInputStream input)
12485            throws java.io.IOException {
12486          return PARSER.parseFrom(input);
12487        }
12488        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12489            com.google.protobuf.CodedInputStream input,
12490            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12491            throws java.io.IOException {
12492          return PARSER.parseFrom(input, extensionRegistry);
12493        }
12494    
12495        public static Builder newBuilder() { return Builder.create(); }
12496        public Builder newBuilderForType() { return newBuilder(); }
12497        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
12498          return newBuilder().mergeFrom(prototype);
12499        }
12500        public Builder toBuilder() { return newBuilder(this); }
12501    
12502        @java.lang.Override
12503        protected Builder newBuilderForType(
12504            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12505          Builder builder = new Builder(parent);
12506          return builder;
12507        }
12508        /**
12509         * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12510         */
12511        public static final class Builder extends
12512            com.google.protobuf.GeneratedMessage.Builder<Builder>
12513           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
12514          public static final com.google.protobuf.Descriptors.Descriptor
12515              getDescriptor() {
12516            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12517          }
12518    
12519          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12520              internalGetFieldAccessorTable() {
12521            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12522                .ensureFieldAccessorsInitialized(
12523                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12524          }
12525    
12526          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
12527          private Builder() {
12528            maybeForceBuilderInitialization();
12529          }
12530    
12531          private Builder(
12532              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12533            super(parent);
12534            maybeForceBuilderInitialization();
12535          }
12536          private void maybeForceBuilderInitialization() {
12537            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12538            }
12539          }
12540          private static Builder create() {
12541            return new Builder();
12542          }
12543    
12544          public Builder clear() {
12545            super.clear();
12546            lastSegmentTxId_ = 0L;
12547            bitField0_ = (bitField0_ & ~0x00000001);
12548            return this;
12549          }
12550    
12551          public Builder clone() {
12552            return create().mergeFrom(buildPartial());
12553          }
12554    
12555          public com.google.protobuf.Descriptors.Descriptor
12556              getDescriptorForType() {
12557            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12558          }
12559    
12560          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
12561            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
12562          }
12563    
12564          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
12565            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
12566            if (!result.isInitialized()) {
12567              throw newUninitializedMessageException(result);
12568            }
12569            return result;
12570          }
12571    
12572          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
12573            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
12574            int from_bitField0_ = bitField0_;
12575            int to_bitField0_ = 0;
12576            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12577              to_bitField0_ |= 0x00000001;
12578            }
12579            result.lastSegmentTxId_ = lastSegmentTxId_;
12580            result.bitField0_ = to_bitField0_;
12581            onBuilt();
12582            return result;
12583          }
12584    
12585          public Builder mergeFrom(com.google.protobuf.Message other) {
12586            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
12587              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
12588            } else {
12589              super.mergeFrom(other);
12590              return this;
12591            }
12592          }
12593    
12594          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
12595            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
12596            if (other.hasLastSegmentTxId()) {
12597              setLastSegmentTxId(other.getLastSegmentTxId());
12598            }
12599            this.mergeUnknownFields(other.getUnknownFields());
12600            return this;
12601          }
12602    
12603          public final boolean isInitialized() {
12604            return true;
12605          }
12606    
12607          public Builder mergeFrom(
12608              com.google.protobuf.CodedInputStream input,
12609              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12610              throws java.io.IOException {
12611            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parsedMessage = null;
12612            try {
12613              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12614            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12615              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) e.getUnfinishedMessage();
12616              throw e;
12617            } finally {
12618              if (parsedMessage != null) {
12619                mergeFrom(parsedMessage);
12620              }
12621            }
12622            return this;
12623          }
12624          private int bitField0_;
12625    
12626          // optional uint64 lastSegmentTxId = 1;
12627          private long lastSegmentTxId_ ;
12628          /**
12629           * <code>optional uint64 lastSegmentTxId = 1;</code>
12630           */
12631          public boolean hasLastSegmentTxId() {
12632            return ((bitField0_ & 0x00000001) == 0x00000001);
12633          }
12634          /**
12635           * <code>optional uint64 lastSegmentTxId = 1;</code>
12636           */
12637          public long getLastSegmentTxId() {
12638            return lastSegmentTxId_;
12639          }
12640          /**
12641           * <code>optional uint64 lastSegmentTxId = 1;</code>
12642           */
12643          public Builder setLastSegmentTxId(long value) {
12644            bitField0_ |= 0x00000001;
12645            lastSegmentTxId_ = value;
12646            onChanged();
12647            return this;
12648          }
12649          /**
12650           * <code>optional uint64 lastSegmentTxId = 1;</code>
12651           */
12652          public Builder clearLastSegmentTxId() {
12653            bitField0_ = (bitField0_ & ~0x00000001);
12654            lastSegmentTxId_ = 0L;
12655            onChanged();
12656            return this;
12657          }
12658    
12659          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochResponseProto)
12660        }
12661    
12662        static {
12663          defaultInstance = new NewEpochResponseProto(true);
12664          defaultInstance.initFields();
12665        }
12666    
12667        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochResponseProto)
12668      }
12669    
12670      public interface GetEditLogManifestRequestProtoOrBuilder
12671          extends com.google.protobuf.MessageOrBuilder {
12672    
12673        // required .hadoop.hdfs.JournalIdProto jid = 1;
12674        /**
12675         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12676         */
12677        boolean hasJid();
12678        /**
12679         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12680         */
12681        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
12682        /**
12683         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12684         */
12685        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
12686    
12687        // required uint64 sinceTxId = 2;
12688        /**
12689         * <code>required uint64 sinceTxId = 2;</code>
12690         *
12691         * <pre>
12692         * Transaction ID
12693         * </pre>
12694         */
12695        boolean hasSinceTxId();
12696        /**
12697         * <code>required uint64 sinceTxId = 2;</code>
12698         *
12699         * <pre>
12700         * Transaction ID
12701         * </pre>
12702         */
12703        long getSinceTxId();
12704    
12705        // optional bool inProgressOk = 4 [default = false];
12706        /**
12707         * <code>optional bool inProgressOk = 4 [default = false];</code>
12708         *
12709         * <pre>
12710         * Whether or not the client will be reading from the returned streams.
12711         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
12712         * </pre>
12713         */
12714        boolean hasInProgressOk();
12715        /**
12716         * <code>optional bool inProgressOk = 4 [default = false];</code>
12717         *
12718         * <pre>
12719         * Whether or not the client will be reading from the returned streams.
12720         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
12721         * </pre>
12722         */
12723        boolean getInProgressOk();
12724      }
12725      /**
12726       * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
12727       *
12728       * <pre>
12729       **
12730       * getEditLogManifest()
12731       * </pre>
12732       */
12733      public static final class GetEditLogManifestRequestProto extends
12734          com.google.protobuf.GeneratedMessage
12735          implements GetEditLogManifestRequestProtoOrBuilder {
12736        // Use GetEditLogManifestRequestProto.newBuilder() to construct.
12737        private GetEditLogManifestRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12738          super(builder);
12739          this.unknownFields = builder.getUnknownFields();
12740        }
12741        private GetEditLogManifestRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12742    
12743        private static final GetEditLogManifestRequestProto defaultInstance;
12744        public static GetEditLogManifestRequestProto getDefaultInstance() {
12745          return defaultInstance;
12746        }
12747    
12748        public GetEditLogManifestRequestProto getDefaultInstanceForType() {
12749          return defaultInstance;
12750        }
12751    
12752        private final com.google.protobuf.UnknownFieldSet unknownFields;
12753        @java.lang.Override
12754        public final com.google.protobuf.UnknownFieldSet
12755            getUnknownFields() {
12756          return this.unknownFields;
12757        }
12758        private GetEditLogManifestRequestProto(
12759            com.google.protobuf.CodedInputStream input,
12760            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12761            throws com.google.protobuf.InvalidProtocolBufferException {
12762          initFields();
12763          int mutable_bitField0_ = 0;
12764          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12765              com.google.protobuf.UnknownFieldSet.newBuilder();
12766          try {
12767            boolean done = false;
12768            while (!done) {
12769              int tag = input.readTag();
12770              switch (tag) {
12771                case 0:
12772                  done = true;
12773                  break;
12774                default: {
12775                  if (!parseUnknownField(input, unknownFields,
12776                                         extensionRegistry, tag)) {
12777                    done = true;
12778                  }
12779                  break;
12780                }
12781                case 10: {
12782                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
12783                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
12784                    subBuilder = jid_.toBuilder();
12785                  }
12786                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
12787                  if (subBuilder != null) {
12788                    subBuilder.mergeFrom(jid_);
12789                    jid_ = subBuilder.buildPartial();
12790                  }
12791                  bitField0_ |= 0x00000001;
12792                  break;
12793                }
12794                case 16: {
12795                  bitField0_ |= 0x00000002;
12796                  sinceTxId_ = input.readUInt64();
12797                  break;
12798                }
12799                case 32: {
12800                  bitField0_ |= 0x00000004;
12801                  inProgressOk_ = input.readBool();
12802                  break;
12803                }
12804              }
12805            }
12806          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12807            throw e.setUnfinishedMessage(this);
12808          } catch (java.io.IOException e) {
12809            throw new com.google.protobuf.InvalidProtocolBufferException(
12810                e.getMessage()).setUnfinishedMessage(this);
12811          } finally {
12812            this.unknownFields = unknownFields.build();
12813            makeExtensionsImmutable();
12814          }
12815        }
12816        public static final com.google.protobuf.Descriptors.Descriptor
12817            getDescriptor() {
12818          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
12819        }
12820    
12821        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12822            internalGetFieldAccessorTable() {
12823          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
12824              .ensureFieldAccessorsInitialized(
12825                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
12826        }
12827    
12828        public static com.google.protobuf.Parser<GetEditLogManifestRequestProto> PARSER =
12829            new com.google.protobuf.AbstractParser<GetEditLogManifestRequestProto>() {
12830          public GetEditLogManifestRequestProto parsePartialFrom(
12831              com.google.protobuf.CodedInputStream input,
12832              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12833              throws com.google.protobuf.InvalidProtocolBufferException {
12834            return new GetEditLogManifestRequestProto(input, extensionRegistry);
12835          }
12836        };
12837    
12838        @java.lang.Override
12839        public com.google.protobuf.Parser<GetEditLogManifestRequestProto> getParserForType() {
12840          return PARSER;
12841        }
12842    
12843        private int bitField0_;
12844        // required .hadoop.hdfs.JournalIdProto jid = 1;
12845        public static final int JID_FIELD_NUMBER = 1;
12846        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
12847        /**
12848         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12849         */
12850        public boolean hasJid() {
12851          return ((bitField0_ & 0x00000001) == 0x00000001);
12852        }
12853        /**
12854         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12855         */
12856        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
12857          return jid_;
12858        }
12859        /**
12860         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12861         */
12862        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12863          return jid_;
12864        }
12865    
12866        // required uint64 sinceTxId = 2;
12867        public static final int SINCETXID_FIELD_NUMBER = 2;
12868        private long sinceTxId_;
12869        /**
12870         * <code>required uint64 sinceTxId = 2;</code>
12871         *
12872         * <pre>
12873         * Transaction ID
12874         * </pre>
12875         */
12876        public boolean hasSinceTxId() {
12877          return ((bitField0_ & 0x00000002) == 0x00000002);
12878        }
12879        /**
12880         * <code>required uint64 sinceTxId = 2;</code>
12881         *
12882         * <pre>
12883         * Transaction ID
12884         * </pre>
12885         */
12886        public long getSinceTxId() {
12887          return sinceTxId_;
12888        }
12889    
12890        // optional bool inProgressOk = 4 [default = false];
12891        public static final int INPROGRESSOK_FIELD_NUMBER = 4;
12892        private boolean inProgressOk_;
12893        /**
12894         * <code>optional bool inProgressOk = 4 [default = false];</code>
12895         *
12896         * <pre>
12897         * Whether or not the client will be reading from the returned streams.
12898         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
12899         * </pre>
12900         */
12901        public boolean hasInProgressOk() {
12902          return ((bitField0_ & 0x00000004) == 0x00000004);
12903        }
12904        /**
12905         * <code>optional bool inProgressOk = 4 [default = false];</code>
12906         *
12907         * <pre>
12908         * Whether or not the client will be reading from the returned streams.
12909         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
12910         * </pre>
12911         */
12912        public boolean getInProgressOk() {
12913          return inProgressOk_;
12914        }
12915    
12916        private void initFields() {
12917          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12918          sinceTxId_ = 0L;
12919          inProgressOk_ = false;
12920        }
12921        private byte memoizedIsInitialized = -1;
12922        public final boolean isInitialized() {
12923          byte isInitialized = memoizedIsInitialized;
12924          if (isInitialized != -1) return isInitialized == 1;
12925    
12926          if (!hasJid()) {
12927            memoizedIsInitialized = 0;
12928            return false;
12929          }
12930          if (!hasSinceTxId()) {
12931            memoizedIsInitialized = 0;
12932            return false;
12933          }
12934          if (!getJid().isInitialized()) {
12935            memoizedIsInitialized = 0;
12936            return false;
12937          }
12938          memoizedIsInitialized = 1;
12939          return true;
12940        }
12941    
12942        public void writeTo(com.google.protobuf.CodedOutputStream output)
12943                            throws java.io.IOException {
12944          getSerializedSize();
12945          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12946            output.writeMessage(1, jid_);
12947          }
12948          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12949            output.writeUInt64(2, sinceTxId_);
12950          }
12951          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12952            output.writeBool(4, inProgressOk_);
12953          }
12954          getUnknownFields().writeTo(output);
12955        }
12956    
12957        private int memoizedSerializedSize = -1;
12958        public int getSerializedSize() {
12959          int size = memoizedSerializedSize;
12960          if (size != -1) return size;
12961    
12962          size = 0;
12963          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12964            size += com.google.protobuf.CodedOutputStream
12965              .computeMessageSize(1, jid_);
12966          }
12967          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12968            size += com.google.protobuf.CodedOutputStream
12969              .computeUInt64Size(2, sinceTxId_);
12970          }
12971          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12972            size += com.google.protobuf.CodedOutputStream
12973              .computeBoolSize(4, inProgressOk_);
12974          }
12975          size += getUnknownFields().getSerializedSize();
12976          memoizedSerializedSize = size;
12977          return size;
12978        }
12979    
12980        private static final long serialVersionUID = 0L;
12981        @java.lang.Override
12982        protected java.lang.Object writeReplace()
12983            throws java.io.ObjectStreamException {
12984          return super.writeReplace();
12985        }
12986    
12987        @java.lang.Override
12988        public boolean equals(final java.lang.Object obj) {
12989          if (obj == this) {
12990           return true;
12991          }
12992          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
12993            return super.equals(obj);
12994          }
12995          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
12996    
12997          boolean result = true;
12998          result = result && (hasJid() == other.hasJid());
12999          if (hasJid()) {
13000            result = result && getJid()
13001                .equals(other.getJid());
13002          }
13003          result = result && (hasSinceTxId() == other.hasSinceTxId());
13004          if (hasSinceTxId()) {
13005            result = result && (getSinceTxId()
13006                == other.getSinceTxId());
13007          }
13008          result = result && (hasInProgressOk() == other.hasInProgressOk());
13009          if (hasInProgressOk()) {
13010            result = result && (getInProgressOk()
13011                == other.getInProgressOk());
13012          }
13013          result = result &&
13014              getUnknownFields().equals(other.getUnknownFields());
13015          return result;
13016        }
13017    
13018        private int memoizedHashCode = 0;
13019        @java.lang.Override
13020        public int hashCode() {
13021          if (memoizedHashCode != 0) {
13022            return memoizedHashCode;
13023          }
13024          int hash = 41;
13025          hash = (19 * hash) + getDescriptorForType().hashCode();
13026          if (hasJid()) {
13027            hash = (37 * hash) + JID_FIELD_NUMBER;
13028            hash = (53 * hash) + getJid().hashCode();
13029          }
13030          if (hasSinceTxId()) {
13031            hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
13032            hash = (53 * hash) + hashLong(getSinceTxId());
13033          }
13034          if (hasInProgressOk()) {
13035            hash = (37 * hash) + INPROGRESSOK_FIELD_NUMBER;
13036            hash = (53 * hash) + hashBoolean(getInProgressOk());
13037          }
13038          hash = (29 * hash) + getUnknownFields().hashCode();
13039          memoizedHashCode = hash;
13040          return hash;
13041        }
13042    
13043        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13044            com.google.protobuf.ByteString data)
13045            throws com.google.protobuf.InvalidProtocolBufferException {
13046          return PARSER.parseFrom(data);
13047        }
13048        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13049            com.google.protobuf.ByteString data,
13050            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13051            throws com.google.protobuf.InvalidProtocolBufferException {
13052          return PARSER.parseFrom(data, extensionRegistry);
13053        }
13054        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
13055            throws com.google.protobuf.InvalidProtocolBufferException {
13056          return PARSER.parseFrom(data);
13057        }
13058        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13059            byte[] data,
13060            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13061            throws com.google.protobuf.InvalidProtocolBufferException {
13062          return PARSER.parseFrom(data, extensionRegistry);
13063        }
13064        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
13065            throws java.io.IOException {
13066          return PARSER.parseFrom(input);
13067        }
13068        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13069            java.io.InputStream input,
13070            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13071            throws java.io.IOException {
13072          return PARSER.parseFrom(input, extensionRegistry);
13073        }
13074        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
13075            throws java.io.IOException {
13076          return PARSER.parseDelimitedFrom(input);
13077        }
13078        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
13079            java.io.InputStream input,
13080            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13081            throws java.io.IOException {
13082          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13083        }
13084        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13085            com.google.protobuf.CodedInputStream input)
13086            throws java.io.IOException {
13087          return PARSER.parseFrom(input);
13088        }
13089        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13090            com.google.protobuf.CodedInputStream input,
13091            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13092            throws java.io.IOException {
13093          return PARSER.parseFrom(input, extensionRegistry);
13094        }
13095    
13096        public static Builder newBuilder() { return Builder.create(); }
13097        public Builder newBuilderForType() { return newBuilder(); }
13098        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
13099          return newBuilder().mergeFrom(prototype);
13100        }
13101        public Builder toBuilder() { return newBuilder(this); }
13102    
13103        @java.lang.Override
13104        protected Builder newBuilderForType(
13105            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13106          Builder builder = new Builder(parent);
13107          return builder;
13108        }
13109        /**
13110         * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
13111         *
13112         * <pre>
13113         **
13114         * getEditLogManifest()
13115         * </pre>
13116         */
13117        public static final class Builder extends
13118            com.google.protobuf.GeneratedMessage.Builder<Builder>
13119           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
13120          public static final com.google.protobuf.Descriptors.Descriptor
13121              getDescriptor() {
13122            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13123          }
13124    
13125          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13126              internalGetFieldAccessorTable() {
13127            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
13128                .ensureFieldAccessorsInitialized(
13129                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
13130          }
13131    
13132          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
13133          private Builder() {
13134            maybeForceBuilderInitialization();
13135          }
13136    
13137          private Builder(
13138              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13139            super(parent);
13140            maybeForceBuilderInitialization();
13141          }
13142          private void maybeForceBuilderInitialization() {
13143            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13144              getJidFieldBuilder();
13145            }
13146          }
13147          private static Builder create() {
13148            return new Builder();
13149          }
13150    
13151          public Builder clear() {
13152            super.clear();
13153            if (jidBuilder_ == null) {
13154              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13155            } else {
13156              jidBuilder_.clear();
13157            }
13158            bitField0_ = (bitField0_ & ~0x00000001);
13159            sinceTxId_ = 0L;
13160            bitField0_ = (bitField0_ & ~0x00000002);
13161            inProgressOk_ = false;
13162            bitField0_ = (bitField0_ & ~0x00000004);
13163            return this;
13164          }
13165    
13166          public Builder clone() {
13167            return create().mergeFrom(buildPartial());
13168          }
13169    
13170          public com.google.protobuf.Descriptors.Descriptor
13171              getDescriptorForType() {
13172            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13173          }
13174    
13175          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
13176            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
13177          }
13178    
13179          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
13180            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
13181            if (!result.isInitialized()) {
13182              throw newUninitializedMessageException(result);
13183            }
13184            return result;
13185          }
13186    
13187          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
13188            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
13189            int from_bitField0_ = bitField0_;
13190            int to_bitField0_ = 0;
13191            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13192              to_bitField0_ |= 0x00000001;
13193            }
13194            if (jidBuilder_ == null) {
13195              result.jid_ = jid_;
13196            } else {
13197              result.jid_ = jidBuilder_.build();
13198            }
13199            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13200              to_bitField0_ |= 0x00000002;
13201            }
13202            result.sinceTxId_ = sinceTxId_;
13203            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
13204              to_bitField0_ |= 0x00000004;
13205            }
13206            result.inProgressOk_ = inProgressOk_;
13207            result.bitField0_ = to_bitField0_;
13208            onBuilt();
13209            return result;
13210          }
13211    
13212          public Builder mergeFrom(com.google.protobuf.Message other) {
13213            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
13214              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
13215            } else {
13216              super.mergeFrom(other);
13217              return this;
13218            }
13219          }
13220    
13221          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
13222            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
13223            if (other.hasJid()) {
13224              mergeJid(other.getJid());
13225            }
13226            if (other.hasSinceTxId()) {
13227              setSinceTxId(other.getSinceTxId());
13228            }
13229            if (other.hasInProgressOk()) {
13230              setInProgressOk(other.getInProgressOk());
13231            }
13232            this.mergeUnknownFields(other.getUnknownFields());
13233            return this;
13234          }
13235    
13236          public final boolean isInitialized() {
13237            if (!hasJid()) {
13238              
13239              return false;
13240            }
13241            if (!hasSinceTxId()) {
13242              
13243              return false;
13244            }
13245            if (!getJid().isInitialized()) {
13246              
13247              return false;
13248            }
13249            return true;
13250          }
13251    
13252          public Builder mergeFrom(
13253              com.google.protobuf.CodedInputStream input,
13254              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13255              throws java.io.IOException {
13256            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null;
13257            try {
13258              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13259            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13260              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage();
13261              throw e;
13262            } finally {
13263              if (parsedMessage != null) {
13264                mergeFrom(parsedMessage);
13265              }
13266            }
13267            return this;
13268          }
13269          private int bitField0_;
13270    
13271          // required .hadoop.hdfs.JournalIdProto jid = 1;
13272          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13273          private com.google.protobuf.SingleFieldBuilder<
13274              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
13275          /**
13276           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13277           */
13278          public boolean hasJid() {
13279            return ((bitField0_ & 0x00000001) == 0x00000001);
13280          }
13281          /**
13282           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13283           */
13284          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13285            if (jidBuilder_ == null) {
13286              return jid_;
13287            } else {
13288              return jidBuilder_.getMessage();
13289            }
13290          }
13291          /**
13292           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13293           */
13294          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13295            if (jidBuilder_ == null) {
13296              if (value == null) {
13297                throw new NullPointerException();
13298              }
13299              jid_ = value;
13300              onChanged();
13301            } else {
13302              jidBuilder_.setMessage(value);
13303            }
13304            bitField0_ |= 0x00000001;
13305            return this;
13306          }
13307          /**
13308           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13309           */
13310          public Builder setJid(
13311              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
13312            if (jidBuilder_ == null) {
13313              jid_ = builderForValue.build();
13314              onChanged();
13315            } else {
13316              jidBuilder_.setMessage(builderForValue.build());
13317            }
13318            bitField0_ |= 0x00000001;
13319            return this;
13320          }
13321          /**
13322           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13323           */
13324          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13325            if (jidBuilder_ == null) {
13326              if (((bitField0_ & 0x00000001) == 0x00000001) &&
13327                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
13328                jid_ =
13329                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
13330              } else {
13331                jid_ = value;
13332              }
13333              onChanged();
13334            } else {
13335              jidBuilder_.mergeFrom(value);
13336            }
13337            bitField0_ |= 0x00000001;
13338            return this;
13339          }
13340          /**
13341           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13342           */
13343          public Builder clearJid() {
13344            if (jidBuilder_ == null) {
13345              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13346              onChanged();
13347            } else {
13348              jidBuilder_.clear();
13349            }
13350            bitField0_ = (bitField0_ & ~0x00000001);
13351            return this;
13352          }
13353          /**
13354           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13355           */
13356          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
13357            bitField0_ |= 0x00000001;
13358            onChanged();
13359            return getJidFieldBuilder().getBuilder();
13360          }
13361          /**
13362           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13363           */
13364          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13365            if (jidBuilder_ != null) {
13366              return jidBuilder_.getMessageOrBuilder();
13367            } else {
13368              return jid_;
13369            }
13370          }
13371          /**
13372           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13373           */
13374          private com.google.protobuf.SingleFieldBuilder<
13375              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
13376              getJidFieldBuilder() {
13377            if (jidBuilder_ == null) {
13378              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13379                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
13380                      jid_,
13381                      getParentForChildren(),
13382                      isClean());
13383              jid_ = null;
13384            }
13385            return jidBuilder_;
13386          }
13387    
13388          // required uint64 sinceTxId = 2;
13389          private long sinceTxId_ ;
13390          /**
13391           * <code>required uint64 sinceTxId = 2;</code>
13392           *
13393           * <pre>
13394           * Transaction ID
13395           * </pre>
13396           */
13397          public boolean hasSinceTxId() {
13398            return ((bitField0_ & 0x00000002) == 0x00000002);
13399          }
13400          /**
13401           * <code>required uint64 sinceTxId = 2;</code>
13402           *
13403           * <pre>
13404           * Transaction ID
13405           * </pre>
13406           */
13407          public long getSinceTxId() {
13408            return sinceTxId_;
13409          }
13410          /**
13411           * <code>required uint64 sinceTxId = 2;</code>
13412           *
13413           * <pre>
13414           * Transaction ID
13415           * </pre>
13416           */
13417          public Builder setSinceTxId(long value) {
13418            bitField0_ |= 0x00000002;
13419            sinceTxId_ = value;
13420            onChanged();
13421            return this;
13422          }
13423          /**
13424           * <code>required uint64 sinceTxId = 2;</code>
13425           *
13426           * <pre>
13427           * Transaction ID
13428           * </pre>
13429           */
13430          public Builder clearSinceTxId() {
13431            bitField0_ = (bitField0_ & ~0x00000002);
13432            sinceTxId_ = 0L;
13433            onChanged();
13434            return this;
13435          }
13436    
13437          // optional bool inProgressOk = 4 [default = false];
13438          private boolean inProgressOk_ ;
13439          /**
13440           * <code>optional bool inProgressOk = 4 [default = false];</code>
13441           *
13442           * <pre>
13443           * Whether or not the client will be reading from the returned streams.
13444           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13445           * </pre>
13446           */
13447          public boolean hasInProgressOk() {
13448            return ((bitField0_ & 0x00000004) == 0x00000004);
13449          }
13450          /**
13451           * <code>optional bool inProgressOk = 4 [default = false];</code>
13452           *
13453           * <pre>
13454           * Whether or not the client will be reading from the returned streams.
13455           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13456           * </pre>
13457           */
13458          public boolean getInProgressOk() {
13459            return inProgressOk_;
13460          }
13461          /**
13462           * <code>optional bool inProgressOk = 4 [default = false];</code>
13463           *
13464           * <pre>
13465           * Whether or not the client will be reading from the returned streams.
13466           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13467           * </pre>
13468           */
13469          public Builder setInProgressOk(boolean value) {
13470            bitField0_ |= 0x00000004;
13471            inProgressOk_ = value;
13472            onChanged();
13473            return this;
13474          }
13475          /**
13476           * <code>optional bool inProgressOk = 4 [default = false];</code>
13477           *
13478           * <pre>
13479           * Whether or not the client will be reading from the returned streams.
13480           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13481           * </pre>
13482           */
13483          public Builder clearInProgressOk() {
13484            bitField0_ = (bitField0_ & ~0x00000004);
13485            inProgressOk_ = false;
13486            onChanged();
13487            return this;
13488          }
13489    
13490          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13491        }
13492    
13493        static {
13494          defaultInstance = new GetEditLogManifestRequestProto(true);
13495          defaultInstance.initFields();
13496        }
13497    
13498        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13499      }
13500    
13501      public interface GetEditLogManifestResponseProtoOrBuilder
13502          extends com.google.protobuf.MessageOrBuilder {
13503    
13504        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13505        /**
13506         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13507         */
13508        boolean hasManifest();
13509        /**
13510         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13511         */
13512        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
13513        /**
13514         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13515         */
13516        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
13517    
13518        // required uint32 httpPort = 2;
13519        /**
13520         * <code>required uint32 httpPort = 2;</code>
13521         */
13522        boolean hasHttpPort();
13523        /**
13524         * <code>required uint32 httpPort = 2;</code>
13525         */
13526        int getHttpPort();
13527      }
13528      /**
13529       * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13530       */
13531      public static final class GetEditLogManifestResponseProto extends
13532          com.google.protobuf.GeneratedMessage
13533          implements GetEditLogManifestResponseProtoOrBuilder {
13534        // Use GetEditLogManifestResponseProto.newBuilder() to construct.
13535        private GetEditLogManifestResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13536          super(builder);
13537          this.unknownFields = builder.getUnknownFields();
13538        }
13539        private GetEditLogManifestResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13540    
13541        private static final GetEditLogManifestResponseProto defaultInstance;
13542        public static GetEditLogManifestResponseProto getDefaultInstance() {
13543          return defaultInstance;
13544        }
13545    
13546        public GetEditLogManifestResponseProto getDefaultInstanceForType() {
13547          return defaultInstance;
13548        }
13549    
13550        private final com.google.protobuf.UnknownFieldSet unknownFields;
13551        @java.lang.Override
13552        public final com.google.protobuf.UnknownFieldSet
13553            getUnknownFields() {
13554          return this.unknownFields;
13555        }
13556        private GetEditLogManifestResponseProto(
13557            com.google.protobuf.CodedInputStream input,
13558            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13559            throws com.google.protobuf.InvalidProtocolBufferException {
13560          initFields();
13561          int mutable_bitField0_ = 0;
13562          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13563              com.google.protobuf.UnknownFieldSet.newBuilder();
13564          try {
13565            boolean done = false;
13566            while (!done) {
13567              int tag = input.readTag();
13568              switch (tag) {
13569                case 0:
13570                  done = true;
13571                  break;
13572                default: {
13573                  if (!parseUnknownField(input, unknownFields,
13574                                         extensionRegistry, tag)) {
13575                    done = true;
13576                  }
13577                  break;
13578                }
13579                case 10: {
13580                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = null;
13581                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
13582                    subBuilder = manifest_.toBuilder();
13583                  }
13584                  manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry);
13585                  if (subBuilder != null) {
13586                    subBuilder.mergeFrom(manifest_);
13587                    manifest_ = subBuilder.buildPartial();
13588                  }
13589                  bitField0_ |= 0x00000001;
13590                  break;
13591                }
13592                case 16: {
13593                  bitField0_ |= 0x00000002;
13594                  httpPort_ = input.readUInt32();
13595                  break;
13596                }
13597              }
13598            }
13599          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13600            throw e.setUnfinishedMessage(this);
13601          } catch (java.io.IOException e) {
13602            throw new com.google.protobuf.InvalidProtocolBufferException(
13603                e.getMessage()).setUnfinishedMessage(this);
13604          } finally {
13605            this.unknownFields = unknownFields.build();
13606            makeExtensionsImmutable();
13607          }
13608        }
13609        public static final com.google.protobuf.Descriptors.Descriptor
13610            getDescriptor() {
13611          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13612        }
13613    
13614        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13615            internalGetFieldAccessorTable() {
13616          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13617              .ensureFieldAccessorsInitialized(
13618                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13619        }
13620    
13621        public static com.google.protobuf.Parser<GetEditLogManifestResponseProto> PARSER =
13622            new com.google.protobuf.AbstractParser<GetEditLogManifestResponseProto>() {
13623          public GetEditLogManifestResponseProto parsePartialFrom(
13624              com.google.protobuf.CodedInputStream input,
13625              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13626              throws com.google.protobuf.InvalidProtocolBufferException {
13627            return new GetEditLogManifestResponseProto(input, extensionRegistry);
13628          }
13629        };
13630    
13631        @java.lang.Override
13632        public com.google.protobuf.Parser<GetEditLogManifestResponseProto> getParserForType() {
13633          return PARSER;
13634        }
13635    
13636        private int bitField0_;
13637        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13638        public static final int MANIFEST_FIELD_NUMBER = 1;
13639        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
13640        /**
13641         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13642         */
13643        public boolean hasManifest() {
13644          return ((bitField0_ & 0x00000001) == 0x00000001);
13645        }
13646        /**
13647         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13648         */
13649        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
13650          return manifest_;
13651        }
13652        /**
13653         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13654         */
13655        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
13656          return manifest_;
13657        }
13658    
13659        // required uint32 httpPort = 2;
13660        public static final int HTTPPORT_FIELD_NUMBER = 2;
13661        private int httpPort_;
13662        /**
13663         * <code>required uint32 httpPort = 2;</code>
13664         */
13665        public boolean hasHttpPort() {
13666          return ((bitField0_ & 0x00000002) == 0x00000002);
13667        }
13668        /**
13669         * <code>required uint32 httpPort = 2;</code>
13670         */
13671        public int getHttpPort() {
13672          return httpPort_;
13673        }
13674    
13675        private void initFields() {
13676          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13677          httpPort_ = 0;
13678        }
13679        private byte memoizedIsInitialized = -1;
13680        public final boolean isInitialized() {
13681          byte isInitialized = memoizedIsInitialized;
13682          if (isInitialized != -1) return isInitialized == 1;
13683    
13684          if (!hasManifest()) {
13685            memoizedIsInitialized = 0;
13686            return false;
13687          }
13688          if (!hasHttpPort()) {
13689            memoizedIsInitialized = 0;
13690            return false;
13691          }
13692          if (!getManifest().isInitialized()) {
13693            memoizedIsInitialized = 0;
13694            return false;
13695          }
13696          memoizedIsInitialized = 1;
13697          return true;
13698        }
13699    
13700        public void writeTo(com.google.protobuf.CodedOutputStream output)
13701                            throws java.io.IOException {
13702          getSerializedSize();
13703          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13704            output.writeMessage(1, manifest_);
13705          }
13706          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13707            output.writeUInt32(2, httpPort_);
13708          }
13709          getUnknownFields().writeTo(output);
13710        }
13711    
13712        private int memoizedSerializedSize = -1;
13713        public int getSerializedSize() {
13714          int size = memoizedSerializedSize;
13715          if (size != -1) return size;
13716    
13717          size = 0;
13718          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13719            size += com.google.protobuf.CodedOutputStream
13720              .computeMessageSize(1, manifest_);
13721          }
13722          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13723            size += com.google.protobuf.CodedOutputStream
13724              .computeUInt32Size(2, httpPort_);
13725          }
13726          size += getUnknownFields().getSerializedSize();
13727          memoizedSerializedSize = size;
13728          return size;
13729        }
13730    
13731        private static final long serialVersionUID = 0L;
13732        @java.lang.Override
13733        protected java.lang.Object writeReplace()
13734            throws java.io.ObjectStreamException {
13735          return super.writeReplace();
13736        }
13737    
13738        @java.lang.Override
13739        public boolean equals(final java.lang.Object obj) {
13740          if (obj == this) {
13741           return true;
13742          }
13743          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
13744            return super.equals(obj);
13745          }
13746          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
13747    
13748          boolean result = true;
13749          result = result && (hasManifest() == other.hasManifest());
13750          if (hasManifest()) {
13751            result = result && getManifest()
13752                .equals(other.getManifest());
13753          }
13754          result = result && (hasHttpPort() == other.hasHttpPort());
13755          if (hasHttpPort()) {
13756            result = result && (getHttpPort()
13757                == other.getHttpPort());
13758          }
13759          result = result &&
13760              getUnknownFields().equals(other.getUnknownFields());
13761          return result;
13762        }
13763    
13764        private int memoizedHashCode = 0;
13765        @java.lang.Override
13766        public int hashCode() {
13767          if (memoizedHashCode != 0) {
13768            return memoizedHashCode;
13769          }
13770          int hash = 41;
13771          hash = (19 * hash) + getDescriptorForType().hashCode();
13772          if (hasManifest()) {
13773            hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
13774            hash = (53 * hash) + getManifest().hashCode();
13775          }
13776          if (hasHttpPort()) {
13777            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
13778            hash = (53 * hash) + getHttpPort();
13779          }
13780          hash = (29 * hash) + getUnknownFields().hashCode();
13781          memoizedHashCode = hash;
13782          return hash;
13783        }
13784    
13785        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13786            com.google.protobuf.ByteString data)
13787            throws com.google.protobuf.InvalidProtocolBufferException {
13788          return PARSER.parseFrom(data);
13789        }
13790        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13791            com.google.protobuf.ByteString data,
13792            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13793            throws com.google.protobuf.InvalidProtocolBufferException {
13794          return PARSER.parseFrom(data, extensionRegistry);
13795        }
13796        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
13797            throws com.google.protobuf.InvalidProtocolBufferException {
13798          return PARSER.parseFrom(data);
13799        }
13800        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13801            byte[] data,
13802            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13803            throws com.google.protobuf.InvalidProtocolBufferException {
13804          return PARSER.parseFrom(data, extensionRegistry);
13805        }
13806        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
13807            throws java.io.IOException {
13808          return PARSER.parseFrom(input);
13809        }
13810        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13811            java.io.InputStream input,
13812            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13813            throws java.io.IOException {
13814          return PARSER.parseFrom(input, extensionRegistry);
13815        }
13816        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
13817            throws java.io.IOException {
13818          return PARSER.parseDelimitedFrom(input);
13819        }
13820        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
13821            java.io.InputStream input,
13822            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13823            throws java.io.IOException {
13824          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13825        }
13826        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13827            com.google.protobuf.CodedInputStream input)
13828            throws java.io.IOException {
13829          return PARSER.parseFrom(input);
13830        }
13831        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13832            com.google.protobuf.CodedInputStream input,
13833            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13834            throws java.io.IOException {
13835          return PARSER.parseFrom(input, extensionRegistry);
13836        }
13837    
13838        public static Builder newBuilder() { return Builder.create(); }
13839        public Builder newBuilderForType() { return newBuilder(); }
13840        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
13841          return newBuilder().mergeFrom(prototype);
13842        }
13843        public Builder toBuilder() { return newBuilder(this); }
13844    
13845        @java.lang.Override
13846        protected Builder newBuilderForType(
13847            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13848          Builder builder = new Builder(parent);
13849          return builder;
13850        }
13851        /**
13852         * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13853         */
13854        public static final class Builder extends
13855            com.google.protobuf.GeneratedMessage.Builder<Builder>
13856           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
13857          public static final com.google.protobuf.Descriptors.Descriptor
13858              getDescriptor() {
13859            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13860          }
13861    
13862          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13863              internalGetFieldAccessorTable() {
13864            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13865                .ensureFieldAccessorsInitialized(
13866                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13867          }
13868    
13869          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
13870          private Builder() {
13871            maybeForceBuilderInitialization();
13872          }
13873    
13874          private Builder(
13875              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13876            super(parent);
13877            maybeForceBuilderInitialization();
13878          }
13879          private void maybeForceBuilderInitialization() {
13880            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13881              getManifestFieldBuilder();
13882            }
13883          }
13884          private static Builder create() {
13885            return new Builder();
13886          }
13887    
13888          public Builder clear() {
13889            super.clear();
13890            if (manifestBuilder_ == null) {
13891              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13892            } else {
13893              manifestBuilder_.clear();
13894            }
13895            bitField0_ = (bitField0_ & ~0x00000001);
13896            httpPort_ = 0;
13897            bitField0_ = (bitField0_ & ~0x00000002);
13898            return this;
13899          }
13900    
13901          public Builder clone() {
13902            return create().mergeFrom(buildPartial());
13903          }
13904    
13905          public com.google.protobuf.Descriptors.Descriptor
13906              getDescriptorForType() {
13907            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13908          }
13909    
13910          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
13911            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
13912          }
13913    
13914          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
13915            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
13916            if (!result.isInitialized()) {
13917              throw newUninitializedMessageException(result);
13918            }
13919            return result;
13920          }
13921    
13922          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
13923            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
13924            int from_bitField0_ = bitField0_;
13925            int to_bitField0_ = 0;
13926            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13927              to_bitField0_ |= 0x00000001;
13928            }
13929            if (manifestBuilder_ == null) {
13930              result.manifest_ = manifest_;
13931            } else {
13932              result.manifest_ = manifestBuilder_.build();
13933            }
13934            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13935              to_bitField0_ |= 0x00000002;
13936            }
13937            result.httpPort_ = httpPort_;
13938            result.bitField0_ = to_bitField0_;
13939            onBuilt();
13940            return result;
13941          }
13942    
13943          public Builder mergeFrom(com.google.protobuf.Message other) {
13944            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
13945              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
13946            } else {
13947              super.mergeFrom(other);
13948              return this;
13949            }
13950          }
13951    
13952          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
13953            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
13954            if (other.hasManifest()) {
13955              mergeManifest(other.getManifest());
13956            }
13957            if (other.hasHttpPort()) {
13958              setHttpPort(other.getHttpPort());
13959            }
13960            this.mergeUnknownFields(other.getUnknownFields());
13961            return this;
13962          }
13963    
13964          public final boolean isInitialized() {
13965            if (!hasManifest()) {
13966              
13967              return false;
13968            }
13969            if (!hasHttpPort()) {
13970              
13971              return false;
13972            }
13973            if (!getManifest().isInitialized()) {
13974              
13975              return false;
13976            }
13977            return true;
13978          }
13979    
13980          public Builder mergeFrom(
13981              com.google.protobuf.CodedInputStream input,
13982              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13983              throws java.io.IOException {
13984            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null;
13985            try {
13986              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13987            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13988              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage();
13989              throw e;
13990            } finally {
13991              if (parsedMessage != null) {
13992                mergeFrom(parsedMessage);
13993              }
13994            }
13995            return this;
13996          }
13997          private int bitField0_;
13998    
13999          // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
14000          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14001          private com.google.protobuf.SingleFieldBuilder<
14002              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
14003          /**
14004           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14005           */
14006          public boolean hasManifest() {
14007            return ((bitField0_ & 0x00000001) == 0x00000001);
14008          }
14009          /**
14010           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14011           */
14012          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
14013            if (manifestBuilder_ == null) {
14014              return manifest_;
14015            } else {
14016              return manifestBuilder_.getMessage();
14017            }
14018          }
14019          /**
14020           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14021           */
14022          public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14023            if (manifestBuilder_ == null) {
14024              if (value == null) {
14025                throw new NullPointerException();
14026              }
14027              manifest_ = value;
14028              onChanged();
14029            } else {
14030              manifestBuilder_.setMessage(value);
14031            }
14032            bitField0_ |= 0x00000001;
14033            return this;
14034          }
14035          /**
14036           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14037           */
14038          public Builder setManifest(
14039              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
14040            if (manifestBuilder_ == null) {
14041              manifest_ = builderForValue.build();
14042              onChanged();
14043            } else {
14044              manifestBuilder_.setMessage(builderForValue.build());
14045            }
14046            bitField0_ |= 0x00000001;
14047            return this;
14048          }
14049          /**
14050           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14051           */
14052          public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14053            if (manifestBuilder_ == null) {
14054              if (((bitField0_ & 0x00000001) == 0x00000001) &&
14055                  manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
14056                manifest_ =
14057                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
14058              } else {
14059                manifest_ = value;
14060              }
14061              onChanged();
14062            } else {
14063              manifestBuilder_.mergeFrom(value);
14064            }
14065            bitField0_ |= 0x00000001;
14066            return this;
14067          }
14068          /**
14069           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14070           */
14071          public Builder clearManifest() {
14072            if (manifestBuilder_ == null) {
14073              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14074              onChanged();
14075            } else {
14076              manifestBuilder_.clear();
14077            }
14078            bitField0_ = (bitField0_ & ~0x00000001);
14079            return this;
14080          }
14081          /**
14082           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14083           */
14084          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
14085            bitField0_ |= 0x00000001;
14086            onChanged();
14087            return getManifestFieldBuilder().getBuilder();
14088          }
14089          /**
14090           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14091           */
14092          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
14093            if (manifestBuilder_ != null) {
14094              return manifestBuilder_.getMessageOrBuilder();
14095            } else {
14096              return manifest_;
14097            }
14098          }
14099          /**
14100           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14101           */
14102          private com.google.protobuf.SingleFieldBuilder<
14103              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> 
14104              getManifestFieldBuilder() {
14105            if (manifestBuilder_ == null) {
14106              manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14107                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
14108                      manifest_,
14109                      getParentForChildren(),
14110                      isClean());
14111              manifest_ = null;
14112            }
14113            return manifestBuilder_;
14114          }
14115    
14116          // required uint32 httpPort = 2;
14117          private int httpPort_ ;
14118          /**
14119           * <code>required uint32 httpPort = 2;</code>
14120           */
14121          public boolean hasHttpPort() {
14122            return ((bitField0_ & 0x00000002) == 0x00000002);
14123          }
14124          /**
14125           * <code>required uint32 httpPort = 2;</code>
14126           */
14127          public int getHttpPort() {
14128            return httpPort_;
14129          }
14130          /**
14131           * <code>required uint32 httpPort = 2;</code>
14132           */
14133          public Builder setHttpPort(int value) {
14134            bitField0_ |= 0x00000002;
14135            httpPort_ = value;
14136            onChanged();
14137            return this;
14138          }
14139          /**
14140           * <code>required uint32 httpPort = 2;</code>
14141           */
14142          public Builder clearHttpPort() {
14143            bitField0_ = (bitField0_ & ~0x00000002);
14144            httpPort_ = 0;
14145            onChanged();
14146            return this;
14147          }
14148    
14149          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14150        }
14151    
14152        static {
14153          defaultInstance = new GetEditLogManifestResponseProto(true);
14154          defaultInstance.initFields();
14155        }
14156    
14157        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14158      }
14159    
14160      public interface PrepareRecoveryRequestProtoOrBuilder
14161          extends com.google.protobuf.MessageOrBuilder {
14162    
14163        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14164        /**
14165         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14166         */
14167        boolean hasReqInfo();
14168        /**
14169         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14170         */
14171        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
14172        /**
14173         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14174         */
14175        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
14176    
14177        // required uint64 segmentTxId = 2;
14178        /**
14179         * <code>required uint64 segmentTxId = 2;</code>
14180         */
14181        boolean hasSegmentTxId();
14182        /**
14183         * <code>required uint64 segmentTxId = 2;</code>
14184         */
14185        long getSegmentTxId();
14186      }
14187      /**
14188       * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14189       *
14190       * <pre>
14191       **
14192       * prepareRecovery()
14193       * </pre>
14194       */
14195      public static final class PrepareRecoveryRequestProto extends
14196          com.google.protobuf.GeneratedMessage
14197          implements PrepareRecoveryRequestProtoOrBuilder {
14198        // Use PrepareRecoveryRequestProto.newBuilder() to construct.
14199        private PrepareRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14200          super(builder);
14201          this.unknownFields = builder.getUnknownFields();
14202        }
14203        private PrepareRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14204    
14205        private static final PrepareRecoveryRequestProto defaultInstance;
14206        public static PrepareRecoveryRequestProto getDefaultInstance() {
14207          return defaultInstance;
14208        }
14209    
14210        public PrepareRecoveryRequestProto getDefaultInstanceForType() {
14211          return defaultInstance;
14212        }
14213    
14214        private final com.google.protobuf.UnknownFieldSet unknownFields;
14215        @java.lang.Override
14216        public final com.google.protobuf.UnknownFieldSet
14217            getUnknownFields() {
14218          return this.unknownFields;
14219        }
14220        private PrepareRecoveryRequestProto(
14221            com.google.protobuf.CodedInputStream input,
14222            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14223            throws com.google.protobuf.InvalidProtocolBufferException {
14224          initFields();
14225          int mutable_bitField0_ = 0;
14226          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14227              com.google.protobuf.UnknownFieldSet.newBuilder();
14228          try {
14229            boolean done = false;
14230            while (!done) {
14231              int tag = input.readTag();
14232              switch (tag) {
14233                case 0:
14234                  done = true;
14235                  break;
14236                default: {
14237                  if (!parseUnknownField(input, unknownFields,
14238                                         extensionRegistry, tag)) {
14239                    done = true;
14240                  }
14241                  break;
14242                }
14243                case 10: {
14244                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
14245                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
14246                    subBuilder = reqInfo_.toBuilder();
14247                  }
14248                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
14249                  if (subBuilder != null) {
14250                    subBuilder.mergeFrom(reqInfo_);
14251                    reqInfo_ = subBuilder.buildPartial();
14252                  }
14253                  bitField0_ |= 0x00000001;
14254                  break;
14255                }
14256                case 16: {
14257                  bitField0_ |= 0x00000002;
14258                  segmentTxId_ = input.readUInt64();
14259                  break;
14260                }
14261              }
14262            }
14263          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14264            throw e.setUnfinishedMessage(this);
14265          } catch (java.io.IOException e) {
14266            throw new com.google.protobuf.InvalidProtocolBufferException(
14267                e.getMessage()).setUnfinishedMessage(this);
14268          } finally {
14269            this.unknownFields = unknownFields.build();
14270            makeExtensionsImmutable();
14271          }
14272        }
14273        public static final com.google.protobuf.Descriptors.Descriptor
14274            getDescriptor() {
14275          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14276        }
14277    
14278        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14279            internalGetFieldAccessorTable() {
14280          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14281              .ensureFieldAccessorsInitialized(
14282                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14283        }
14284    
14285        public static com.google.protobuf.Parser<PrepareRecoveryRequestProto> PARSER =
14286            new com.google.protobuf.AbstractParser<PrepareRecoveryRequestProto>() {
14287          public PrepareRecoveryRequestProto parsePartialFrom(
14288              com.google.protobuf.CodedInputStream input,
14289              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14290              throws com.google.protobuf.InvalidProtocolBufferException {
14291            return new PrepareRecoveryRequestProto(input, extensionRegistry);
14292          }
14293        };
14294    
14295        @java.lang.Override
14296        public com.google.protobuf.Parser<PrepareRecoveryRequestProto> getParserForType() {
14297          return PARSER;
14298        }
14299    
14300        private int bitField0_;
14301        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14302        public static final int REQINFO_FIELD_NUMBER = 1;
14303        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
14304        /**
14305         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14306         */
14307        public boolean hasReqInfo() {
14308          return ((bitField0_ & 0x00000001) == 0x00000001);
14309        }
14310        /**
14311         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14312         */
14313        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14314          return reqInfo_;
14315        }
14316        /**
14317         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14318         */
14319        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14320          return reqInfo_;
14321        }
14322    
14323        // required uint64 segmentTxId = 2;
14324        public static final int SEGMENTTXID_FIELD_NUMBER = 2;
14325        private long segmentTxId_;
14326        /**
14327         * <code>required uint64 segmentTxId = 2;</code>
14328         */
14329        public boolean hasSegmentTxId() {
14330          return ((bitField0_ & 0x00000002) == 0x00000002);
14331        }
14332        /**
14333         * <code>required uint64 segmentTxId = 2;</code>
14334         */
14335        public long getSegmentTxId() {
14336          return segmentTxId_;
14337        }
14338    
14339        private void initFields() {
14340          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14341          segmentTxId_ = 0L;
14342        }
14343        private byte memoizedIsInitialized = -1;
14344        public final boolean isInitialized() {
14345          byte isInitialized = memoizedIsInitialized;
14346          if (isInitialized != -1) return isInitialized == 1;
14347    
14348          if (!hasReqInfo()) {
14349            memoizedIsInitialized = 0;
14350            return false;
14351          }
14352          if (!hasSegmentTxId()) {
14353            memoizedIsInitialized = 0;
14354            return false;
14355          }
14356          if (!getReqInfo().isInitialized()) {
14357            memoizedIsInitialized = 0;
14358            return false;
14359          }
14360          memoizedIsInitialized = 1;
14361          return true;
14362        }
14363    
14364        public void writeTo(com.google.protobuf.CodedOutputStream output)
14365                            throws java.io.IOException {
14366          getSerializedSize();
14367          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14368            output.writeMessage(1, reqInfo_);
14369          }
14370          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14371            output.writeUInt64(2, segmentTxId_);
14372          }
14373          getUnknownFields().writeTo(output);
14374        }
14375    
14376        private int memoizedSerializedSize = -1;
14377        public int getSerializedSize() {
14378          int size = memoizedSerializedSize;
14379          if (size != -1) return size;
14380    
14381          size = 0;
14382          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14383            size += com.google.protobuf.CodedOutputStream
14384              .computeMessageSize(1, reqInfo_);
14385          }
14386          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14387            size += com.google.protobuf.CodedOutputStream
14388              .computeUInt64Size(2, segmentTxId_);
14389          }
14390          size += getUnknownFields().getSerializedSize();
14391          memoizedSerializedSize = size;
14392          return size;
14393        }
14394    
14395        private static final long serialVersionUID = 0L;
14396        @java.lang.Override
14397        protected java.lang.Object writeReplace()
14398            throws java.io.ObjectStreamException {
14399          return super.writeReplace();
14400        }
14401    
14402        @java.lang.Override
14403        public boolean equals(final java.lang.Object obj) {
14404          if (obj == this) {
14405           return true;
14406          }
14407          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
14408            return super.equals(obj);
14409          }
14410          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
14411    
14412          boolean result = true;
14413          result = result && (hasReqInfo() == other.hasReqInfo());
14414          if (hasReqInfo()) {
14415            result = result && getReqInfo()
14416                .equals(other.getReqInfo());
14417          }
14418          result = result && (hasSegmentTxId() == other.hasSegmentTxId());
14419          if (hasSegmentTxId()) {
14420            result = result && (getSegmentTxId()
14421                == other.getSegmentTxId());
14422          }
14423          result = result &&
14424              getUnknownFields().equals(other.getUnknownFields());
14425          return result;
14426        }
14427    
14428        private int memoizedHashCode = 0;
14429        @java.lang.Override
14430        public int hashCode() {
14431          if (memoizedHashCode != 0) {
14432            return memoizedHashCode;
14433          }
14434          int hash = 41;
14435          hash = (19 * hash) + getDescriptorForType().hashCode();
14436          if (hasReqInfo()) {
14437            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
14438            hash = (53 * hash) + getReqInfo().hashCode();
14439          }
14440          if (hasSegmentTxId()) {
14441            hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
14442            hash = (53 * hash) + hashLong(getSegmentTxId());
14443          }
14444          hash = (29 * hash) + getUnknownFields().hashCode();
14445          memoizedHashCode = hash;
14446          return hash;
14447        }
14448    
14449        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14450            com.google.protobuf.ByteString data)
14451            throws com.google.protobuf.InvalidProtocolBufferException {
14452          return PARSER.parseFrom(data);
14453        }
14454        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14455            com.google.protobuf.ByteString data,
14456            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14457            throws com.google.protobuf.InvalidProtocolBufferException {
14458          return PARSER.parseFrom(data, extensionRegistry);
14459        }
14460        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
14461            throws com.google.protobuf.InvalidProtocolBufferException {
14462          return PARSER.parseFrom(data);
14463        }
14464        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14465            byte[] data,
14466            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14467            throws com.google.protobuf.InvalidProtocolBufferException {
14468          return PARSER.parseFrom(data, extensionRegistry);
14469        }
14470        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
14471            throws java.io.IOException {
14472          return PARSER.parseFrom(input);
14473        }
14474        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14475            java.io.InputStream input,
14476            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14477            throws java.io.IOException {
14478          return PARSER.parseFrom(input, extensionRegistry);
14479        }
14480        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
14481            throws java.io.IOException {
14482          return PARSER.parseDelimitedFrom(input);
14483        }
14484        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
14485            java.io.InputStream input,
14486            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14487            throws java.io.IOException {
14488          return PARSER.parseDelimitedFrom(input, extensionRegistry);
14489        }
14490        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14491            com.google.protobuf.CodedInputStream input)
14492            throws java.io.IOException {
14493          return PARSER.parseFrom(input);
14494        }
14495        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14496            com.google.protobuf.CodedInputStream input,
14497            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14498            throws java.io.IOException {
14499          return PARSER.parseFrom(input, extensionRegistry);
14500        }
14501    
14502        public static Builder newBuilder() { return Builder.create(); }
14503        public Builder newBuilderForType() { return newBuilder(); }
14504        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
14505          return newBuilder().mergeFrom(prototype);
14506        }
14507        public Builder toBuilder() { return newBuilder(this); }
14508    
14509        @java.lang.Override
14510        protected Builder newBuilderForType(
14511            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14512          Builder builder = new Builder(parent);
14513          return builder;
14514        }
14515        /**
14516         * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14517         *
14518         * <pre>
14519         **
14520         * prepareRecovery()
14521         * </pre>
14522         */
14523        public static final class Builder extends
14524            com.google.protobuf.GeneratedMessage.Builder<Builder>
14525           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
14526          public static final com.google.protobuf.Descriptors.Descriptor
14527              getDescriptor() {
14528            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14529          }
14530    
14531          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14532              internalGetFieldAccessorTable() {
14533            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14534                .ensureFieldAccessorsInitialized(
14535                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14536          }
14537    
14538          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
14539          private Builder() {
14540            maybeForceBuilderInitialization();
14541          }
14542    
14543          private Builder(
14544              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14545            super(parent);
14546            maybeForceBuilderInitialization();
14547          }
14548          private void maybeForceBuilderInitialization() {
14549            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14550              getReqInfoFieldBuilder();
14551            }
14552          }
14553          private static Builder create() {
14554            return new Builder();
14555          }
14556    
14557          public Builder clear() {
14558            super.clear();
14559            if (reqInfoBuilder_ == null) {
14560              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14561            } else {
14562              reqInfoBuilder_.clear();
14563            }
14564            bitField0_ = (bitField0_ & ~0x00000001);
14565            segmentTxId_ = 0L;
14566            bitField0_ = (bitField0_ & ~0x00000002);
14567            return this;
14568          }
14569    
14570          public Builder clone() {
14571            return create().mergeFrom(buildPartial());
14572          }
14573    
14574          public com.google.protobuf.Descriptors.Descriptor
14575              getDescriptorForType() {
14576            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14577          }
14578    
14579          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
14580            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
14581          }
14582    
14583          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
14584            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
14585            if (!result.isInitialized()) {
14586              throw newUninitializedMessageException(result);
14587            }
14588            return result;
14589          }
14590    
14591          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
14592            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
14593            int from_bitField0_ = bitField0_;
14594            int to_bitField0_ = 0;
14595            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14596              to_bitField0_ |= 0x00000001;
14597            }
14598            if (reqInfoBuilder_ == null) {
14599              result.reqInfo_ = reqInfo_;
14600            } else {
14601              result.reqInfo_ = reqInfoBuilder_.build();
14602            }
14603            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14604              to_bitField0_ |= 0x00000002;
14605            }
14606            result.segmentTxId_ = segmentTxId_;
14607            result.bitField0_ = to_bitField0_;
14608            onBuilt();
14609            return result;
14610          }
14611    
14612          public Builder mergeFrom(com.google.protobuf.Message other) {
14613            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
14614              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
14615            } else {
14616              super.mergeFrom(other);
14617              return this;
14618            }
14619          }
14620    
14621          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
14622            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
14623            if (other.hasReqInfo()) {
14624              mergeReqInfo(other.getReqInfo());
14625            }
14626            if (other.hasSegmentTxId()) {
14627              setSegmentTxId(other.getSegmentTxId());
14628            }
14629            this.mergeUnknownFields(other.getUnknownFields());
14630            return this;
14631          }
14632    
14633          public final boolean isInitialized() {
14634            if (!hasReqInfo()) {
14635              
14636              return false;
14637            }
14638            if (!hasSegmentTxId()) {
14639              
14640              return false;
14641            }
14642            if (!getReqInfo().isInitialized()) {
14643              
14644              return false;
14645            }
14646            return true;
14647          }
14648    
14649          public Builder mergeFrom(
14650              com.google.protobuf.CodedInputStream input,
14651              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14652              throws java.io.IOException {
14653            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parsedMessage = null;
14654            try {
14655              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14656            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14657              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) e.getUnfinishedMessage();
14658              throw e;
14659            } finally {
14660              if (parsedMessage != null) {
14661                mergeFrom(parsedMessage);
14662              }
14663            }
14664            return this;
14665          }
14666          private int bitField0_;
14667    
14668          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14669          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14670          private com.google.protobuf.SingleFieldBuilder<
14671              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
14672          /**
14673           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14674           */
14675          public boolean hasReqInfo() {
14676            return ((bitField0_ & 0x00000001) == 0x00000001);
14677          }
14678          /**
14679           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14680           */
14681          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14682            if (reqInfoBuilder_ == null) {
14683              return reqInfo_;
14684            } else {
14685              return reqInfoBuilder_.getMessage();
14686            }
14687          }
14688          /**
14689           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14690           */
14691          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
14692            if (reqInfoBuilder_ == null) {
14693              if (value == null) {
14694                throw new NullPointerException();
14695              }
14696              reqInfo_ = value;
14697              onChanged();
14698            } else {
14699              reqInfoBuilder_.setMessage(value);
14700            }
14701            bitField0_ |= 0x00000001;
14702            return this;
14703          }
14704          /**
14705           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14706           */
14707          public Builder setReqInfo(
14708              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
14709            if (reqInfoBuilder_ == null) {
14710              reqInfo_ = builderForValue.build();
14711              onChanged();
14712            } else {
14713              reqInfoBuilder_.setMessage(builderForValue.build());
14714            }
14715            bitField0_ |= 0x00000001;
14716            return this;
14717          }
14718          /**
14719           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14720           */
14721          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
14722            if (reqInfoBuilder_ == null) {
14723              if (((bitField0_ & 0x00000001) == 0x00000001) &&
14724                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
14725                reqInfo_ =
14726                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
14727              } else {
14728                reqInfo_ = value;
14729              }
14730              onChanged();
14731            } else {
14732              reqInfoBuilder_.mergeFrom(value);
14733            }
14734            bitField0_ |= 0x00000001;
14735            return this;
14736          }
14737          /**
14738           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14739           */
14740          public Builder clearReqInfo() {
14741            if (reqInfoBuilder_ == null) {
14742              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14743              onChanged();
14744            } else {
14745              reqInfoBuilder_.clear();
14746            }
14747            bitField0_ = (bitField0_ & ~0x00000001);
14748            return this;
14749          }
14750          /**
14751           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14752           */
14753          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
14754            bitField0_ |= 0x00000001;
14755            onChanged();
14756            return getReqInfoFieldBuilder().getBuilder();
14757          }
14758          /**
14759           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14760           */
14761          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14762            if (reqInfoBuilder_ != null) {
14763              return reqInfoBuilder_.getMessageOrBuilder();
14764            } else {
14765              return reqInfo_;
14766            }
14767          }
14768          /**
14769           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14770           */
14771          private com.google.protobuf.SingleFieldBuilder<
14772              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
14773              getReqInfoFieldBuilder() {
14774            if (reqInfoBuilder_ == null) {
14775              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14776                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
14777                      reqInfo_,
14778                      getParentForChildren(),
14779                      isClean());
14780              reqInfo_ = null;
14781            }
14782            return reqInfoBuilder_;
14783          }
14784    
14785          // required uint64 segmentTxId = 2;
14786          private long segmentTxId_ ;
14787          /**
14788           * <code>required uint64 segmentTxId = 2;</code>
14789           */
14790          public boolean hasSegmentTxId() {
14791            return ((bitField0_ & 0x00000002) == 0x00000002);
14792          }
14793          /**
14794           * <code>required uint64 segmentTxId = 2;</code>
14795           */
14796          public long getSegmentTxId() {
14797            return segmentTxId_;
14798          }
14799          /**
14800           * <code>required uint64 segmentTxId = 2;</code>
14801           */
14802          public Builder setSegmentTxId(long value) {
14803            bitField0_ |= 0x00000002;
14804            segmentTxId_ = value;
14805            onChanged();
14806            return this;
14807          }
14808          /**
14809           * <code>required uint64 segmentTxId = 2;</code>
14810           */
14811          public Builder clearSegmentTxId() {
14812            bitField0_ = (bitField0_ & ~0x00000002);
14813            segmentTxId_ = 0L;
14814            onChanged();
14815            return this;
14816          }
14817    
14818          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
14819        }
14820    
14821        static {
14822          defaultInstance = new PrepareRecoveryRequestProto(true);
14823          defaultInstance.initFields();
14824        }
14825    
14826        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
14827      }
14828    
14829      public interface PrepareRecoveryResponseProtoOrBuilder
14830          extends com.google.protobuf.MessageOrBuilder {
14831    
14832        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
14833        /**
14834         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14835         */
14836        boolean hasSegmentState();
14837        /**
14838         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14839         */
14840        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
14841        /**
14842         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14843         */
14844        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
14845    
14846        // optional uint64 acceptedInEpoch = 2;
14847        /**
14848         * <code>optional uint64 acceptedInEpoch = 2;</code>
14849         */
14850        boolean hasAcceptedInEpoch();
14851        /**
14852         * <code>optional uint64 acceptedInEpoch = 2;</code>
14853         */
14854        long getAcceptedInEpoch();
14855    
14856        // required uint64 lastWriterEpoch = 3;
14857        /**
14858         * <code>required uint64 lastWriterEpoch = 3;</code>
14859         */
14860        boolean hasLastWriterEpoch();
14861        /**
14862         * <code>required uint64 lastWriterEpoch = 3;</code>
14863         */
14864        long getLastWriterEpoch();
14865    
14866        // optional uint64 lastCommittedTxId = 4;
14867        /**
14868         * <code>optional uint64 lastCommittedTxId = 4;</code>
14869         *
14870         * <pre>
14871         * The highest committed txid that this logger has ever seen.
14872         * This may be higher than the data it actually has, in the case
14873         * that it was lagging before the old writer crashed.
14874         * </pre>
14875         */
14876        boolean hasLastCommittedTxId();
14877        /**
14878         * <code>optional uint64 lastCommittedTxId = 4;</code>
14879         *
14880         * <pre>
14881         * The highest committed txid that this logger has ever seen.
14882         * This may be higher than the data it actually has, in the case
14883         * that it was lagging before the old writer crashed.
14884         * </pre>
14885         */
14886        long getLastCommittedTxId();
14887      }
14888      /**
14889       * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
14890       */
14891      public static final class PrepareRecoveryResponseProto extends
14892          com.google.protobuf.GeneratedMessage
14893          implements PrepareRecoveryResponseProtoOrBuilder {
14894        // Use PrepareRecoveryResponseProto.newBuilder() to construct.
14895        private PrepareRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14896          super(builder);
14897          this.unknownFields = builder.getUnknownFields();
14898        }
14899        private PrepareRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14900    
14901        private static final PrepareRecoveryResponseProto defaultInstance;
14902        public static PrepareRecoveryResponseProto getDefaultInstance() {
14903          return defaultInstance;
14904        }
14905    
14906        public PrepareRecoveryResponseProto getDefaultInstanceForType() {
14907          return defaultInstance;
14908        }
14909    
14910        private final com.google.protobuf.UnknownFieldSet unknownFields;
14911        @java.lang.Override
14912        public final com.google.protobuf.UnknownFieldSet
14913            getUnknownFields() {
14914          return this.unknownFields;
14915        }
14916        private PrepareRecoveryResponseProto(
14917            com.google.protobuf.CodedInputStream input,
14918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14919            throws com.google.protobuf.InvalidProtocolBufferException {
14920          initFields();
14921          int mutable_bitField0_ = 0;
14922          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14923              com.google.protobuf.UnknownFieldSet.newBuilder();
14924          try {
14925            boolean done = false;
14926            while (!done) {
14927              int tag = input.readTag();
14928              switch (tag) {
14929                case 0:
14930                  done = true;
14931                  break;
14932                default: {
14933                  if (!parseUnknownField(input, unknownFields,
14934                                         extensionRegistry, tag)) {
14935                    done = true;
14936                  }
14937                  break;
14938                }
14939                case 10: {
14940                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
14941                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
14942                    subBuilder = segmentState_.toBuilder();
14943                  }
14944                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
14945                  if (subBuilder != null) {
14946                    subBuilder.mergeFrom(segmentState_);
14947                    segmentState_ = subBuilder.buildPartial();
14948                  }
14949                  bitField0_ |= 0x00000001;
14950                  break;
14951                }
14952                case 16: {
14953                  bitField0_ |= 0x00000002;
14954                  acceptedInEpoch_ = input.readUInt64();
14955                  break;
14956                }
14957                case 24: {
14958                  bitField0_ |= 0x00000004;
14959                  lastWriterEpoch_ = input.readUInt64();
14960                  break;
14961                }
14962                case 32: {
14963                  bitField0_ |= 0x00000008;
14964                  lastCommittedTxId_ = input.readUInt64();
14965                  break;
14966                }
14967              }
14968            }
14969          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14970            throw e.setUnfinishedMessage(this);
14971          } catch (java.io.IOException e) {
14972            throw new com.google.protobuf.InvalidProtocolBufferException(
14973                e.getMessage()).setUnfinishedMessage(this);
14974          } finally {
14975            this.unknownFields = unknownFields.build();
14976            makeExtensionsImmutable();
14977          }
14978        }
14979        public static final com.google.protobuf.Descriptors.Descriptor
14980            getDescriptor() {
14981          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
14982        }
14983    
14984        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14985            internalGetFieldAccessorTable() {
14986          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
14987              .ensureFieldAccessorsInitialized(
14988                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
14989        }
14990    
14991        public static com.google.protobuf.Parser<PrepareRecoveryResponseProto> PARSER =
14992            new com.google.protobuf.AbstractParser<PrepareRecoveryResponseProto>() {
14993          public PrepareRecoveryResponseProto parsePartialFrom(
14994              com.google.protobuf.CodedInputStream input,
14995              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14996              throws com.google.protobuf.InvalidProtocolBufferException {
14997            return new PrepareRecoveryResponseProto(input, extensionRegistry);
14998          }
14999        };
15000    
15001        @java.lang.Override
15002        public com.google.protobuf.Parser<PrepareRecoveryResponseProto> getParserForType() {
15003          return PARSER;
15004        }
15005    
15006        private int bitField0_;
15007        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15008        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
15009        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
15010        /**
15011         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15012         */
15013        public boolean hasSegmentState() {
15014          return ((bitField0_ & 0x00000001) == 0x00000001);
15015        }
15016        /**
15017         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15018         */
15019        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15020          return segmentState_;
15021        }
15022        /**
15023         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15024         */
15025        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15026          return segmentState_;
15027        }
15028    
15029        // optional uint64 acceptedInEpoch = 2;
15030        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
15031        private long acceptedInEpoch_;
15032        /**
15033         * <code>optional uint64 acceptedInEpoch = 2;</code>
15034         */
15035        public boolean hasAcceptedInEpoch() {
15036          return ((bitField0_ & 0x00000002) == 0x00000002);
15037        }
15038        /**
15039         * <code>optional uint64 acceptedInEpoch = 2;</code>
15040         */
15041        public long getAcceptedInEpoch() {
15042          return acceptedInEpoch_;
15043        }
15044    
15045        // required uint64 lastWriterEpoch = 3;
15046        public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
15047        private long lastWriterEpoch_;
15048        /**
15049         * <code>required uint64 lastWriterEpoch = 3;</code>
15050         */
15051        public boolean hasLastWriterEpoch() {
15052          return ((bitField0_ & 0x00000004) == 0x00000004);
15053        }
15054        /**
15055         * <code>required uint64 lastWriterEpoch = 3;</code>
15056         */
15057        public long getLastWriterEpoch() {
15058          return lastWriterEpoch_;
15059        }
15060    
15061        // optional uint64 lastCommittedTxId = 4;
15062        public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
15063        private long lastCommittedTxId_;
15064        /**
15065         * <code>optional uint64 lastCommittedTxId = 4;</code>
15066         *
15067         * <pre>
15068         * The highest committed txid that this logger has ever seen.
15069         * This may be higher than the data it actually has, in the case
15070         * that it was lagging before the old writer crashed.
15071         * </pre>
15072         */
15073        public boolean hasLastCommittedTxId() {
15074          return ((bitField0_ & 0x00000008) == 0x00000008);
15075        }
15076        /**
15077         * <code>optional uint64 lastCommittedTxId = 4;</code>
15078         *
15079         * <pre>
15080         * The highest committed txid that this logger has ever seen.
15081         * This may be higher than the data it actually has, in the case
15082         * that it was lagging before the old writer crashed.
15083         * </pre>
15084         */
15085        public long getLastCommittedTxId() {
15086          return lastCommittedTxId_;
15087        }
15088    
15089        private void initFields() {
15090          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15091          acceptedInEpoch_ = 0L;
15092          lastWriterEpoch_ = 0L;
15093          lastCommittedTxId_ = 0L;
15094        }
15095        private byte memoizedIsInitialized = -1;
15096        public final boolean isInitialized() {
15097          byte isInitialized = memoizedIsInitialized;
15098          if (isInitialized != -1) return isInitialized == 1;
15099    
15100          if (!hasLastWriterEpoch()) {
15101            memoizedIsInitialized = 0;
15102            return false;
15103          }
15104          if (hasSegmentState()) {
15105            if (!getSegmentState().isInitialized()) {
15106              memoizedIsInitialized = 0;
15107              return false;
15108            }
15109          }
15110          memoizedIsInitialized = 1;
15111          return true;
15112        }
15113    
15114        public void writeTo(com.google.protobuf.CodedOutputStream output)
15115                            throws java.io.IOException {
15116          getSerializedSize();
15117          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15118            output.writeMessage(1, segmentState_);
15119          }
15120          if (((bitField0_ & 0x00000002) == 0x00000002)) {
15121            output.writeUInt64(2, acceptedInEpoch_);
15122          }
15123          if (((bitField0_ & 0x00000004) == 0x00000004)) {
15124            output.writeUInt64(3, lastWriterEpoch_);
15125          }
15126          if (((bitField0_ & 0x00000008) == 0x00000008)) {
15127            output.writeUInt64(4, lastCommittedTxId_);
15128          }
15129          getUnknownFields().writeTo(output);
15130        }
15131    
15132        private int memoizedSerializedSize = -1;
15133        public int getSerializedSize() {
15134          int size = memoizedSerializedSize;
15135          if (size != -1) return size;
15136    
15137          size = 0;
15138          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15139            size += com.google.protobuf.CodedOutputStream
15140              .computeMessageSize(1, segmentState_);
15141          }
15142          if (((bitField0_ & 0x00000002) == 0x00000002)) {
15143            size += com.google.protobuf.CodedOutputStream
15144              .computeUInt64Size(2, acceptedInEpoch_);
15145          }
15146          if (((bitField0_ & 0x00000004) == 0x00000004)) {
15147            size += com.google.protobuf.CodedOutputStream
15148              .computeUInt64Size(3, lastWriterEpoch_);
15149          }
15150          if (((bitField0_ & 0x00000008) == 0x00000008)) {
15151            size += com.google.protobuf.CodedOutputStream
15152              .computeUInt64Size(4, lastCommittedTxId_);
15153          }
15154          size += getUnknownFields().getSerializedSize();
15155          memoizedSerializedSize = size;
15156          return size;
15157        }
15158    
15159        private static final long serialVersionUID = 0L;
15160        @java.lang.Override
15161        protected java.lang.Object writeReplace()
15162            throws java.io.ObjectStreamException {
15163          return super.writeReplace();
15164        }
15165    
15166        @java.lang.Override
15167        public boolean equals(final java.lang.Object obj) {
15168          if (obj == this) {
15169           return true;
15170          }
15171          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
15172            return super.equals(obj);
15173          }
15174          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
15175    
15176          boolean result = true;
15177          result = result && (hasSegmentState() == other.hasSegmentState());
15178          if (hasSegmentState()) {
15179            result = result && getSegmentState()
15180                .equals(other.getSegmentState());
15181          }
15182          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
15183          if (hasAcceptedInEpoch()) {
15184            result = result && (getAcceptedInEpoch()
15185                == other.getAcceptedInEpoch());
15186          }
15187          result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
15188          if (hasLastWriterEpoch()) {
15189            result = result && (getLastWriterEpoch()
15190                == other.getLastWriterEpoch());
15191          }
15192          result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
15193          if (hasLastCommittedTxId()) {
15194            result = result && (getLastCommittedTxId()
15195                == other.getLastCommittedTxId());
15196          }
15197          result = result &&
15198              getUnknownFields().equals(other.getUnknownFields());
15199          return result;
15200        }
15201    
15202        private int memoizedHashCode = 0;
15203        @java.lang.Override
15204        public int hashCode() {
15205          if (memoizedHashCode != 0) {
15206            return memoizedHashCode;
15207          }
15208          int hash = 41;
15209          hash = (19 * hash) + getDescriptorForType().hashCode();
15210          if (hasSegmentState()) {
15211            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
15212            hash = (53 * hash) + getSegmentState().hashCode();
15213          }
15214          if (hasAcceptedInEpoch()) {
15215            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
15216            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
15217          }
15218          if (hasLastWriterEpoch()) {
15219            hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
15220            hash = (53 * hash) + hashLong(getLastWriterEpoch());
15221          }
15222          if (hasLastCommittedTxId()) {
15223            hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
15224            hash = (53 * hash) + hashLong(getLastCommittedTxId());
15225          }
15226          hash = (29 * hash) + getUnknownFields().hashCode();
15227          memoizedHashCode = hash;
15228          return hash;
15229        }
15230    
15231        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15232            com.google.protobuf.ByteString data)
15233            throws com.google.protobuf.InvalidProtocolBufferException {
15234          return PARSER.parseFrom(data);
15235        }
15236        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15237            com.google.protobuf.ByteString data,
15238            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15239            throws com.google.protobuf.InvalidProtocolBufferException {
15240          return PARSER.parseFrom(data, extensionRegistry);
15241        }
15242        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
15243            throws com.google.protobuf.InvalidProtocolBufferException {
15244          return PARSER.parseFrom(data);
15245        }
15246        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15247            byte[] data,
15248            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15249            throws com.google.protobuf.InvalidProtocolBufferException {
15250          return PARSER.parseFrom(data, extensionRegistry);
15251        }
15252        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
15253            throws java.io.IOException {
15254          return PARSER.parseFrom(input);
15255        }
15256        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15257            java.io.InputStream input,
15258            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15259            throws java.io.IOException {
15260          return PARSER.parseFrom(input, extensionRegistry);
15261        }
15262        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
15263            throws java.io.IOException {
15264          return PARSER.parseDelimitedFrom(input);
15265        }
15266        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
15267            java.io.InputStream input,
15268            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15269            throws java.io.IOException {
15270          return PARSER.parseDelimitedFrom(input, extensionRegistry);
15271        }
15272        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15273            com.google.protobuf.CodedInputStream input)
15274            throws java.io.IOException {
15275          return PARSER.parseFrom(input);
15276        }
15277        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15278            com.google.protobuf.CodedInputStream input,
15279            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15280            throws java.io.IOException {
15281          return PARSER.parseFrom(input, extensionRegistry);
15282        }
15283    
15284        public static Builder newBuilder() { return Builder.create(); }
15285        public Builder newBuilderForType() { return newBuilder(); }
15286        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
15287          return newBuilder().mergeFrom(prototype);
15288        }
15289        public Builder toBuilder() { return newBuilder(this); }
15290    
15291        @java.lang.Override
15292        protected Builder newBuilderForType(
15293            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15294          Builder builder = new Builder(parent);
15295          return builder;
15296        }
15297        /**
15298         * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
15299         */
15300        public static final class Builder extends
15301            com.google.protobuf.GeneratedMessage.Builder<Builder>
15302           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
15303          public static final com.google.protobuf.Descriptors.Descriptor
15304              getDescriptor() {
15305            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15306          }
15307    
15308          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15309              internalGetFieldAccessorTable() {
15310            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
15311                .ensureFieldAccessorsInitialized(
15312                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15313          }
15314    
15315          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
15316          private Builder() {
15317            maybeForceBuilderInitialization();
15318          }
15319    
15320          private Builder(
15321              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15322            super(parent);
15323            maybeForceBuilderInitialization();
15324          }
15325          private void maybeForceBuilderInitialization() {
15326            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
15327              getSegmentStateFieldBuilder();
15328            }
15329          }
15330          private static Builder create() {
15331            return new Builder();
15332          }
15333    
15334          public Builder clear() {
15335            super.clear();
15336            if (segmentStateBuilder_ == null) {
15337              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15338            } else {
15339              segmentStateBuilder_.clear();
15340            }
15341            bitField0_ = (bitField0_ & ~0x00000001);
15342            acceptedInEpoch_ = 0L;
15343            bitField0_ = (bitField0_ & ~0x00000002);
15344            lastWriterEpoch_ = 0L;
15345            bitField0_ = (bitField0_ & ~0x00000004);
15346            lastCommittedTxId_ = 0L;
15347            bitField0_ = (bitField0_ & ~0x00000008);
15348            return this;
15349          }
15350    
15351          public Builder clone() {
15352            return create().mergeFrom(buildPartial());
15353          }
15354    
15355          public com.google.protobuf.Descriptors.Descriptor
15356              getDescriptorForType() {
15357            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15358          }
15359    
15360          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
15361            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
15362          }
15363    
15364          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
15365            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
15366            if (!result.isInitialized()) {
15367              throw newUninitializedMessageException(result);
15368            }
15369            return result;
15370          }
15371    
15372          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
15373            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
15374            int from_bitField0_ = bitField0_;
15375            int to_bitField0_ = 0;
15376            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
15377              to_bitField0_ |= 0x00000001;
15378            }
15379            if (segmentStateBuilder_ == null) {
15380              result.segmentState_ = segmentState_;
15381            } else {
15382              result.segmentState_ = segmentStateBuilder_.build();
15383            }
15384            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
15385              to_bitField0_ |= 0x00000002;
15386            }
15387            result.acceptedInEpoch_ = acceptedInEpoch_;
15388            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
15389              to_bitField0_ |= 0x00000004;
15390            }
15391            result.lastWriterEpoch_ = lastWriterEpoch_;
15392            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
15393              to_bitField0_ |= 0x00000008;
15394            }
15395            result.lastCommittedTxId_ = lastCommittedTxId_;
15396            result.bitField0_ = to_bitField0_;
15397            onBuilt();
15398            return result;
15399          }
15400    
15401          public Builder mergeFrom(com.google.protobuf.Message other) {
15402            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
15403              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
15404            } else {
15405              super.mergeFrom(other);
15406              return this;
15407            }
15408          }
15409    
15410          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
15411            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
15412            if (other.hasSegmentState()) {
15413              mergeSegmentState(other.getSegmentState());
15414            }
15415            if (other.hasAcceptedInEpoch()) {
15416              setAcceptedInEpoch(other.getAcceptedInEpoch());
15417            }
15418            if (other.hasLastWriterEpoch()) {
15419              setLastWriterEpoch(other.getLastWriterEpoch());
15420            }
15421            if (other.hasLastCommittedTxId()) {
15422              setLastCommittedTxId(other.getLastCommittedTxId());
15423            }
15424            this.mergeUnknownFields(other.getUnknownFields());
15425            return this;
15426          }
15427    
15428          public final boolean isInitialized() {
15429            if (!hasLastWriterEpoch()) {
15430              
15431              return false;
15432            }
15433            if (hasSegmentState()) {
15434              if (!getSegmentState().isInitialized()) {
15435                
15436                return false;
15437              }
15438            }
15439            return true;
15440          }
15441    
15442          public Builder mergeFrom(
15443              com.google.protobuf.CodedInputStream input,
15444              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15445              throws java.io.IOException {
15446            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parsedMessage = null;
15447            try {
15448              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15449            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15450              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) e.getUnfinishedMessage();
15451              throw e;
15452            } finally {
15453              if (parsedMessage != null) {
15454                mergeFrom(parsedMessage);
15455              }
15456            }
15457            return this;
15458          }
15459          private int bitField0_;
15460    
15461          // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15462          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15463          private com.google.protobuf.SingleFieldBuilder<
15464              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
15465          /**
15466           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15467           */
15468          public boolean hasSegmentState() {
15469            return ((bitField0_ & 0x00000001) == 0x00000001);
15470          }
15471          /**
15472           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15473           */
15474          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15475            if (segmentStateBuilder_ == null) {
15476              return segmentState_;
15477            } else {
15478              return segmentStateBuilder_.getMessage();
15479            }
15480          }
15481          /**
15482           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15483           */
15484          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15485            if (segmentStateBuilder_ == null) {
15486              if (value == null) {
15487                throw new NullPointerException();
15488              }
15489              segmentState_ = value;
15490              onChanged();
15491            } else {
15492              segmentStateBuilder_.setMessage(value);
15493            }
15494            bitField0_ |= 0x00000001;
15495            return this;
15496          }
15497          /**
15498           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15499           */
15500          public Builder setSegmentState(
15501              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
15502            if (segmentStateBuilder_ == null) {
15503              segmentState_ = builderForValue.build();
15504              onChanged();
15505            } else {
15506              segmentStateBuilder_.setMessage(builderForValue.build());
15507            }
15508            bitField0_ |= 0x00000001;
15509            return this;
15510          }
15511          /**
15512           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15513           */
15514          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15515            if (segmentStateBuilder_ == null) {
15516              if (((bitField0_ & 0x00000001) == 0x00000001) &&
15517                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
15518                segmentState_ =
15519                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
15520              } else {
15521                segmentState_ = value;
15522              }
15523              onChanged();
15524            } else {
15525              segmentStateBuilder_.mergeFrom(value);
15526            }
15527            bitField0_ |= 0x00000001;
15528            return this;
15529          }
15530          /**
15531           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15532           */
15533          public Builder clearSegmentState() {
15534            if (segmentStateBuilder_ == null) {
15535              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15536              onChanged();
15537            } else {
15538              segmentStateBuilder_.clear();
15539            }
15540            bitField0_ = (bitField0_ & ~0x00000001);
15541            return this;
15542          }
15543          /**
15544           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15545           */
15546          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
15547            bitField0_ |= 0x00000001;
15548            onChanged();
15549            return getSegmentStateFieldBuilder().getBuilder();
15550          }
15551          /**
15552           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15553           */
15554          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15555            if (segmentStateBuilder_ != null) {
15556              return segmentStateBuilder_.getMessageOrBuilder();
15557            } else {
15558              return segmentState_;
15559            }
15560          }
15561          /**
15562           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15563           */
15564          private com.google.protobuf.SingleFieldBuilder<
15565              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
15566              getSegmentStateFieldBuilder() {
15567            if (segmentStateBuilder_ == null) {
15568              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15569                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
15570                      segmentState_,
15571                      getParentForChildren(),
15572                      isClean());
15573              segmentState_ = null;
15574            }
15575            return segmentStateBuilder_;
15576          }
15577    
15578          // optional uint64 acceptedInEpoch = 2;
15579          private long acceptedInEpoch_ ;
15580          /**
15581           * <code>optional uint64 acceptedInEpoch = 2;</code>
15582           */
15583          public boolean hasAcceptedInEpoch() {
15584            return ((bitField0_ & 0x00000002) == 0x00000002);
15585          }
15586          /**
15587           * <code>optional uint64 acceptedInEpoch = 2;</code>
15588           */
15589          public long getAcceptedInEpoch() {
15590            return acceptedInEpoch_;
15591          }
15592          /**
15593           * <code>optional uint64 acceptedInEpoch = 2;</code>
15594           */
15595          public Builder setAcceptedInEpoch(long value) {
15596            bitField0_ |= 0x00000002;
15597            acceptedInEpoch_ = value;
15598            onChanged();
15599            return this;
15600          }
15601          /**
15602           * <code>optional uint64 acceptedInEpoch = 2;</code>
15603           */
15604          public Builder clearAcceptedInEpoch() {
15605            bitField0_ = (bitField0_ & ~0x00000002);
15606            acceptedInEpoch_ = 0L;
15607            onChanged();
15608            return this;
15609          }
15610    
15611          // required uint64 lastWriterEpoch = 3;
15612          private long lastWriterEpoch_ ;
15613          /**
15614           * <code>required uint64 lastWriterEpoch = 3;</code>
15615           */
15616          public boolean hasLastWriterEpoch() {
15617            return ((bitField0_ & 0x00000004) == 0x00000004);
15618          }
15619          /**
15620           * <code>required uint64 lastWriterEpoch = 3;</code>
15621           */
15622          public long getLastWriterEpoch() {
15623            return lastWriterEpoch_;
15624          }
15625          /**
15626           * <code>required uint64 lastWriterEpoch = 3;</code>
15627           */
15628          public Builder setLastWriterEpoch(long value) {
15629            bitField0_ |= 0x00000004;
15630            lastWriterEpoch_ = value;
15631            onChanged();
15632            return this;
15633          }
15634          /**
15635           * <code>required uint64 lastWriterEpoch = 3;</code>
15636           */
15637          public Builder clearLastWriterEpoch() {
15638            bitField0_ = (bitField0_ & ~0x00000004);
15639            lastWriterEpoch_ = 0L;
15640            onChanged();
15641            return this;
15642          }
15643    
15644          // optional uint64 lastCommittedTxId = 4;
15645          private long lastCommittedTxId_ ;
15646          /**
15647           * <code>optional uint64 lastCommittedTxId = 4;</code>
15648           *
15649           * <pre>
15650           * The highest committed txid that this logger has ever seen.
15651           * This may be higher than the data it actually has, in the case
15652           * that it was lagging before the old writer crashed.
15653           * </pre>
15654           */
15655          public boolean hasLastCommittedTxId() {
15656            return ((bitField0_ & 0x00000008) == 0x00000008);
15657          }
15658          /**
15659           * <code>optional uint64 lastCommittedTxId = 4;</code>
15660           *
15661           * <pre>
15662           * The highest committed txid that this logger has ever seen.
15663           * This may be higher than the data it actually has, in the case
15664           * that it was lagging before the old writer crashed.
15665           * </pre>
15666           */
15667          public long getLastCommittedTxId() {
15668            return lastCommittedTxId_;
15669          }
15670          /**
15671           * <code>optional uint64 lastCommittedTxId = 4;</code>
15672           *
15673           * <pre>
15674           * The highest committed txid that this logger has ever seen.
15675           * This may be higher than the data it actually has, in the case
15676           * that it was lagging before the old writer crashed.
15677           * </pre>
15678           */
15679          public Builder setLastCommittedTxId(long value) {
15680            bitField0_ |= 0x00000008;
15681            lastCommittedTxId_ = value;
15682            onChanged();
15683            return this;
15684          }
15685          /**
15686           * <code>optional uint64 lastCommittedTxId = 4;</code>
15687           *
15688           * <pre>
15689           * The highest committed txid that this logger has ever seen.
15690           * This may be higher than the data it actually has, in the case
15691           * that it was lagging before the old writer crashed.
15692           * </pre>
15693           */
15694          public Builder clearLastCommittedTxId() {
15695            bitField0_ = (bitField0_ & ~0x00000008);
15696            lastCommittedTxId_ = 0L;
15697            onChanged();
15698            return this;
15699          }
15700    
15701          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
15702        }
15703    
15704        static {
15705          defaultInstance = new PrepareRecoveryResponseProto(true);
15706          defaultInstance.initFields();
15707        }
15708    
15709        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
15710      }
15711    
15712      public interface AcceptRecoveryRequestProtoOrBuilder
15713          extends com.google.protobuf.MessageOrBuilder {
15714    
15715        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15716        /**
15717         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15718         */
15719        boolean hasReqInfo();
15720        /**
15721         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15722         */
15723        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
15724        /**
15725         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15726         */
15727        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
15728    
15729        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
15730        /**
15731         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15732         *
15733         * <pre>
15734         ** Details on the segment to recover 
15735         * </pre>
15736         */
15737        boolean hasStateToAccept();
15738        /**
15739         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15740         *
15741         * <pre>
15742         ** Details on the segment to recover 
15743         * </pre>
15744         */
15745        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
15746        /**
15747         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15748         *
15749         * <pre>
15750         ** Details on the segment to recover 
15751         * </pre>
15752         */
15753        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
15754    
15755        // required string fromURL = 3;
15756        /**
15757         * <code>required string fromURL = 3;</code>
15758         *
15759         * <pre>
15760         ** The URL from which the log may be copied 
15761         * </pre>
15762         */
15763        boolean hasFromURL();
15764        /**
15765         * <code>required string fromURL = 3;</code>
15766         *
15767         * <pre>
15768         ** The URL from which the log may be copied 
15769         * </pre>
15770         */
15771        java.lang.String getFromURL();
15772        /**
15773         * <code>required string fromURL = 3;</code>
15774         *
15775         * <pre>
15776         ** The URL from which the log may be copied 
15777         * </pre>
15778         */
15779        com.google.protobuf.ByteString
15780            getFromURLBytes();
15781      }
15782      /**
15783       * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
15784       *
15785       * <pre>
15786       **
15787       * acceptRecovery()
15788       * </pre>
15789       */
15790      public static final class AcceptRecoveryRequestProto extends
15791          com.google.protobuf.GeneratedMessage
15792          implements AcceptRecoveryRequestProtoOrBuilder {
15793        // Use AcceptRecoveryRequestProto.newBuilder() to construct.
15794        private AcceptRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15795          super(builder);
15796          this.unknownFields = builder.getUnknownFields();
15797        }
15798        private AcceptRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15799    
15800        private static final AcceptRecoveryRequestProto defaultInstance;
15801        public static AcceptRecoveryRequestProto getDefaultInstance() {
15802          return defaultInstance;
15803        }
15804    
15805        public AcceptRecoveryRequestProto getDefaultInstanceForType() {
15806          return defaultInstance;
15807        }
15808    
15809        private final com.google.protobuf.UnknownFieldSet unknownFields;
15810        @java.lang.Override
15811        public final com.google.protobuf.UnknownFieldSet
15812            getUnknownFields() {
15813          return this.unknownFields;
15814        }
15815        private AcceptRecoveryRequestProto(
15816            com.google.protobuf.CodedInputStream input,
15817            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15818            throws com.google.protobuf.InvalidProtocolBufferException {
15819          initFields();
15820          int mutable_bitField0_ = 0;
15821          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15822              com.google.protobuf.UnknownFieldSet.newBuilder();
15823          try {
15824            boolean done = false;
15825            while (!done) {
15826              int tag = input.readTag();
15827              switch (tag) {
15828                case 0:
15829                  done = true;
15830                  break;
15831                default: {
15832                  if (!parseUnknownField(input, unknownFields,
15833                                         extensionRegistry, tag)) {
15834                    done = true;
15835                  }
15836                  break;
15837                }
15838                case 10: {
15839                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
15840                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
15841                    subBuilder = reqInfo_.toBuilder();
15842                  }
15843                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
15844                  if (subBuilder != null) {
15845                    subBuilder.mergeFrom(reqInfo_);
15846                    reqInfo_ = subBuilder.buildPartial();
15847                  }
15848                  bitField0_ |= 0x00000001;
15849                  break;
15850                }
15851                case 18: {
15852                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
15853                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
15854                    subBuilder = stateToAccept_.toBuilder();
15855                  }
15856                  stateToAccept_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
15857                  if (subBuilder != null) {
15858                    subBuilder.mergeFrom(stateToAccept_);
15859                    stateToAccept_ = subBuilder.buildPartial();
15860                  }
15861                  bitField0_ |= 0x00000002;
15862                  break;
15863                }
15864                case 26: {
15865                  bitField0_ |= 0x00000004;
15866                  fromURL_ = input.readBytes();
15867                  break;
15868                }
15869              }
15870            }
15871          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15872            throw e.setUnfinishedMessage(this);
15873          } catch (java.io.IOException e) {
15874            throw new com.google.protobuf.InvalidProtocolBufferException(
15875                e.getMessage()).setUnfinishedMessage(this);
15876          } finally {
15877            this.unknownFields = unknownFields.build();
15878            makeExtensionsImmutable();
15879          }
15880        }
15881        public static final com.google.protobuf.Descriptors.Descriptor
15882            getDescriptor() {
15883          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
15884        }
15885    
15886        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15887            internalGetFieldAccessorTable() {
15888          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
15889              .ensureFieldAccessorsInitialized(
15890                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
15891        }
15892    
15893        public static com.google.protobuf.Parser<AcceptRecoveryRequestProto> PARSER =
15894            new com.google.protobuf.AbstractParser<AcceptRecoveryRequestProto>() {
15895          public AcceptRecoveryRequestProto parsePartialFrom(
15896              com.google.protobuf.CodedInputStream input,
15897              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15898              throws com.google.protobuf.InvalidProtocolBufferException {
15899            return new AcceptRecoveryRequestProto(input, extensionRegistry);
15900          }
15901        };
15902    
15903        @java.lang.Override
15904        public com.google.protobuf.Parser<AcceptRecoveryRequestProto> getParserForType() {
15905          return PARSER;
15906        }
15907    
15908        private int bitField0_;
15909        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15910        public static final int REQINFO_FIELD_NUMBER = 1;
15911        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
15912        /**
15913         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15914         */
15915        public boolean hasReqInfo() {
15916          return ((bitField0_ & 0x00000001) == 0x00000001);
15917        }
15918        /**
15919         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15920         */
15921        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
15922          return reqInfo_;
15923        }
15924        /**
15925         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15926         */
15927        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
15928          return reqInfo_;
15929        }
15930    
15931        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
15932        public static final int STATETOACCEPT_FIELD_NUMBER = 2;
15933        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
15934        /**
15935         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15936         *
15937         * <pre>
15938         ** Details on the segment to recover 
15939         * </pre>
15940         */
15941        public boolean hasStateToAccept() {
15942          return ((bitField0_ & 0x00000002) == 0x00000002);
15943        }
15944        /**
15945         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15946         *
15947         * <pre>
15948         ** Details on the segment to recover 
15949         * </pre>
15950         */
15951        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
15952          return stateToAccept_;
15953        }
15954        /**
15955         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15956         *
15957         * <pre>
15958         ** Details on the segment to recover 
15959         * </pre>
15960         */
15961        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
15962          return stateToAccept_;
15963        }
15964    
15965        // required string fromURL = 3;
15966        public static final int FROMURL_FIELD_NUMBER = 3;
15967        private java.lang.Object fromURL_;
15968        /**
15969         * <code>required string fromURL = 3;</code>
15970         *
15971         * <pre>
15972         ** The URL from which the log may be copied 
15973         * </pre>
15974         */
15975        public boolean hasFromURL() {
15976          return ((bitField0_ & 0x00000004) == 0x00000004);
15977        }
15978        /**
15979         * <code>required string fromURL = 3;</code>
15980         *
15981         * <pre>
15982         ** The URL from which the log may be copied 
15983         * </pre>
15984         */
15985        public java.lang.String getFromURL() {
15986          java.lang.Object ref = fromURL_;
15987          if (ref instanceof java.lang.String) {
15988            return (java.lang.String) ref;
15989          } else {
15990            com.google.protobuf.ByteString bs = 
15991                (com.google.protobuf.ByteString) ref;
15992            java.lang.String s = bs.toStringUtf8();
15993            if (bs.isValidUtf8()) {
15994              fromURL_ = s;
15995            }
15996            return s;
15997          }
15998        }
15999        /**
16000         * <code>required string fromURL = 3;</code>
16001         *
16002         * <pre>
16003         ** The URL from which the log may be copied 
16004         * </pre>
16005         */
16006        public com.google.protobuf.ByteString
16007            getFromURLBytes() {
16008          java.lang.Object ref = fromURL_;
16009          if (ref instanceof java.lang.String) {
16010            com.google.protobuf.ByteString b = 
16011                com.google.protobuf.ByteString.copyFromUtf8(
16012                    (java.lang.String) ref);
16013            fromURL_ = b;
16014            return b;
16015          } else {
16016            return (com.google.protobuf.ByteString) ref;
16017          }
16018        }
16019    
16020        private void initFields() {
16021          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16022          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16023          fromURL_ = "";
16024        }
16025        private byte memoizedIsInitialized = -1;
16026        public final boolean isInitialized() {
16027          byte isInitialized = memoizedIsInitialized;
16028          if (isInitialized != -1) return isInitialized == 1;
16029    
16030          if (!hasReqInfo()) {
16031            memoizedIsInitialized = 0;
16032            return false;
16033          }
16034          if (!hasStateToAccept()) {
16035            memoizedIsInitialized = 0;
16036            return false;
16037          }
16038          if (!hasFromURL()) {
16039            memoizedIsInitialized = 0;
16040            return false;
16041          }
16042          if (!getReqInfo().isInitialized()) {
16043            memoizedIsInitialized = 0;
16044            return false;
16045          }
16046          if (!getStateToAccept().isInitialized()) {
16047            memoizedIsInitialized = 0;
16048            return false;
16049          }
16050          memoizedIsInitialized = 1;
16051          return true;
16052        }
16053    
16054        public void writeTo(com.google.protobuf.CodedOutputStream output)
16055                            throws java.io.IOException {
16056          getSerializedSize();
16057          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16058            output.writeMessage(1, reqInfo_);
16059          }
16060          if (((bitField0_ & 0x00000002) == 0x00000002)) {
16061            output.writeMessage(2, stateToAccept_);
16062          }
16063          if (((bitField0_ & 0x00000004) == 0x00000004)) {
16064            output.writeBytes(3, getFromURLBytes());
16065          }
16066          getUnknownFields().writeTo(output);
16067        }
16068    
16069        private int memoizedSerializedSize = -1;
16070        public int getSerializedSize() {
16071          int size = memoizedSerializedSize;
16072          if (size != -1) return size;
16073    
16074          size = 0;
16075          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16076            size += com.google.protobuf.CodedOutputStream
16077              .computeMessageSize(1, reqInfo_);
16078          }
16079          if (((bitField0_ & 0x00000002) == 0x00000002)) {
16080            size += com.google.protobuf.CodedOutputStream
16081              .computeMessageSize(2, stateToAccept_);
16082          }
16083          if (((bitField0_ & 0x00000004) == 0x00000004)) {
16084            size += com.google.protobuf.CodedOutputStream
16085              .computeBytesSize(3, getFromURLBytes());
16086          }
16087          size += getUnknownFields().getSerializedSize();
16088          memoizedSerializedSize = size;
16089          return size;
16090        }
16091    
16092        private static final long serialVersionUID = 0L;
16093        @java.lang.Override
16094        protected java.lang.Object writeReplace()
16095            throws java.io.ObjectStreamException {
16096          return super.writeReplace();
16097        }
16098    
16099        @java.lang.Override
16100        public boolean equals(final java.lang.Object obj) {
16101          if (obj == this) {
16102           return true;
16103          }
16104          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
16105            return super.equals(obj);
16106          }
16107          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
16108    
16109          boolean result = true;
16110          result = result && (hasReqInfo() == other.hasReqInfo());
16111          if (hasReqInfo()) {
16112            result = result && getReqInfo()
16113                .equals(other.getReqInfo());
16114          }
16115          result = result && (hasStateToAccept() == other.hasStateToAccept());
16116          if (hasStateToAccept()) {
16117            result = result && getStateToAccept()
16118                .equals(other.getStateToAccept());
16119          }
16120          result = result && (hasFromURL() == other.hasFromURL());
16121          if (hasFromURL()) {
16122            result = result && getFromURL()
16123                .equals(other.getFromURL());
16124          }
16125          result = result &&
16126              getUnknownFields().equals(other.getUnknownFields());
16127          return result;
16128        }
16129    
16130        private int memoizedHashCode = 0;
16131        @java.lang.Override
16132        public int hashCode() {
16133          if (memoizedHashCode != 0) {
16134            return memoizedHashCode;
16135          }
16136          int hash = 41;
16137          hash = (19 * hash) + getDescriptorForType().hashCode();
16138          if (hasReqInfo()) {
16139            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
16140            hash = (53 * hash) + getReqInfo().hashCode();
16141          }
16142          if (hasStateToAccept()) {
16143            hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
16144            hash = (53 * hash) + getStateToAccept().hashCode();
16145          }
16146          if (hasFromURL()) {
16147            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
16148            hash = (53 * hash) + getFromURL().hashCode();
16149          }
16150          hash = (29 * hash) + getUnknownFields().hashCode();
16151          memoizedHashCode = hash;
16152          return hash;
16153        }
16154    
16155        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16156            com.google.protobuf.ByteString data)
16157            throws com.google.protobuf.InvalidProtocolBufferException {
16158          return PARSER.parseFrom(data);
16159        }
16160        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16161            com.google.protobuf.ByteString data,
16162            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16163            throws com.google.protobuf.InvalidProtocolBufferException {
16164          return PARSER.parseFrom(data, extensionRegistry);
16165        }
16166        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
16167            throws com.google.protobuf.InvalidProtocolBufferException {
16168          return PARSER.parseFrom(data);
16169        }
16170        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16171            byte[] data,
16172            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16173            throws com.google.protobuf.InvalidProtocolBufferException {
16174          return PARSER.parseFrom(data, extensionRegistry);
16175        }
16176        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
16177            throws java.io.IOException {
16178          return PARSER.parseFrom(input);
16179        }
16180        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16181            java.io.InputStream input,
16182            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16183            throws java.io.IOException {
16184          return PARSER.parseFrom(input, extensionRegistry);
16185        }
16186        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
16187            throws java.io.IOException {
16188          return PARSER.parseDelimitedFrom(input);
16189        }
16190        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
16191            java.io.InputStream input,
16192            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16193            throws java.io.IOException {
16194          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16195        }
16196        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16197            com.google.protobuf.CodedInputStream input)
16198            throws java.io.IOException {
16199          return PARSER.parseFrom(input);
16200        }
16201        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16202            com.google.protobuf.CodedInputStream input,
16203            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16204            throws java.io.IOException {
16205          return PARSER.parseFrom(input, extensionRegistry);
16206        }
16207    
16208        public static Builder newBuilder() { return Builder.create(); }
16209        public Builder newBuilderForType() { return newBuilder(); }
16210        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
16211          return newBuilder().mergeFrom(prototype);
16212        }
16213        public Builder toBuilder() { return newBuilder(this); }
16214    
16215        @java.lang.Override
16216        protected Builder newBuilderForType(
16217            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16218          Builder builder = new Builder(parent);
16219          return builder;
16220        }
16221        /**
16222         * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
16223         *
16224         * <pre>
16225         **
16226         * acceptRecovery()
16227         * </pre>
16228         */
16229        public static final class Builder extends
16230            com.google.protobuf.GeneratedMessage.Builder<Builder>
16231           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
16232          public static final com.google.protobuf.Descriptors.Descriptor
16233              getDescriptor() {
16234            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16235          }
16236    
16237          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16238              internalGetFieldAccessorTable() {
16239            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
16240                .ensureFieldAccessorsInitialized(
16241                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
16242          }
16243    
16244          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
16245          private Builder() {
16246            maybeForceBuilderInitialization();
16247          }
16248    
16249          private Builder(
16250              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16251            super(parent);
16252            maybeForceBuilderInitialization();
16253          }
16254          private void maybeForceBuilderInitialization() {
16255            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16256              getReqInfoFieldBuilder();
16257              getStateToAcceptFieldBuilder();
16258            }
16259          }
16260          private static Builder create() {
16261            return new Builder();
16262          }
16263    
16264          public Builder clear() {
16265            super.clear();
16266            if (reqInfoBuilder_ == null) {
16267              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16268            } else {
16269              reqInfoBuilder_.clear();
16270            }
16271            bitField0_ = (bitField0_ & ~0x00000001);
16272            if (stateToAcceptBuilder_ == null) {
16273              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16274            } else {
16275              stateToAcceptBuilder_.clear();
16276            }
16277            bitField0_ = (bitField0_ & ~0x00000002);
16278            fromURL_ = "";
16279            bitField0_ = (bitField0_ & ~0x00000004);
16280            return this;
16281          }
16282    
16283          public Builder clone() {
16284            return create().mergeFrom(buildPartial());
16285          }
16286    
16287          public com.google.protobuf.Descriptors.Descriptor
16288              getDescriptorForType() {
16289            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16290          }
16291    
16292          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
16293            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
16294          }
16295    
16296          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
16297            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
16298            if (!result.isInitialized()) {
16299              throw newUninitializedMessageException(result);
16300            }
16301            return result;
16302          }
16303    
16304          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
16305            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
16306            int from_bitField0_ = bitField0_;
16307            int to_bitField0_ = 0;
16308            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16309              to_bitField0_ |= 0x00000001;
16310            }
16311            if (reqInfoBuilder_ == null) {
16312              result.reqInfo_ = reqInfo_;
16313            } else {
16314              result.reqInfo_ = reqInfoBuilder_.build();
16315            }
16316            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
16317              to_bitField0_ |= 0x00000002;
16318            }
16319            if (stateToAcceptBuilder_ == null) {
16320              result.stateToAccept_ = stateToAccept_;
16321            } else {
16322              result.stateToAccept_ = stateToAcceptBuilder_.build();
16323            }
16324            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
16325              to_bitField0_ |= 0x00000004;
16326            }
16327            result.fromURL_ = fromURL_;
16328            result.bitField0_ = to_bitField0_;
16329            onBuilt();
16330            return result;
16331          }
16332    
16333          public Builder mergeFrom(com.google.protobuf.Message other) {
16334            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
16335              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
16336            } else {
16337              super.mergeFrom(other);
16338              return this;
16339            }
16340          }
16341    
16342          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
16343            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
16344            if (other.hasReqInfo()) {
16345              mergeReqInfo(other.getReqInfo());
16346            }
16347            if (other.hasStateToAccept()) {
16348              mergeStateToAccept(other.getStateToAccept());
16349            }
16350            if (other.hasFromURL()) {
16351              bitField0_ |= 0x00000004;
16352              fromURL_ = other.fromURL_;
16353              onChanged();
16354            }
16355            this.mergeUnknownFields(other.getUnknownFields());
16356            return this;
16357          }
16358    
16359          public final boolean isInitialized() {
16360            if (!hasReqInfo()) {
16361              
16362              return false;
16363            }
16364            if (!hasStateToAccept()) {
16365              
16366              return false;
16367            }
16368            if (!hasFromURL()) {
16369              
16370              return false;
16371            }
16372            if (!getReqInfo().isInitialized()) {
16373              
16374              return false;
16375            }
16376            if (!getStateToAccept().isInitialized()) {
16377              
16378              return false;
16379            }
16380            return true;
16381          }
16382    
16383          public Builder mergeFrom(
16384              com.google.protobuf.CodedInputStream input,
16385              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16386              throws java.io.IOException {
16387            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parsedMessage = null;
16388            try {
16389              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16390            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16391              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) e.getUnfinishedMessage();
16392              throw e;
16393            } finally {
16394              if (parsedMessage != null) {
16395                mergeFrom(parsedMessage);
16396              }
16397            }
16398            return this;
16399          }
16400          private int bitField0_;
16401    
16402          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
16403          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16404          private com.google.protobuf.SingleFieldBuilder<
16405              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
16406          /**
16407           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16408           */
16409          public boolean hasReqInfo() {
16410            return ((bitField0_ & 0x00000001) == 0x00000001);
16411          }
16412          /**
16413           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16414           */
16415          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
16416            if (reqInfoBuilder_ == null) {
16417              return reqInfo_;
16418            } else {
16419              return reqInfoBuilder_.getMessage();
16420            }
16421          }
16422          /**
16423           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16424           */
16425          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16426            if (reqInfoBuilder_ == null) {
16427              if (value == null) {
16428                throw new NullPointerException();
16429              }
16430              reqInfo_ = value;
16431              onChanged();
16432            } else {
16433              reqInfoBuilder_.setMessage(value);
16434            }
16435            bitField0_ |= 0x00000001;
16436            return this;
16437          }
16438          /**
16439           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16440           */
16441          public Builder setReqInfo(
16442              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
16443            if (reqInfoBuilder_ == null) {
16444              reqInfo_ = builderForValue.build();
16445              onChanged();
16446            } else {
16447              reqInfoBuilder_.setMessage(builderForValue.build());
16448            }
16449            bitField0_ |= 0x00000001;
16450            return this;
16451          }
16452          /**
16453           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16454           */
16455          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16456            if (reqInfoBuilder_ == null) {
16457              if (((bitField0_ & 0x00000001) == 0x00000001) &&
16458                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
16459                reqInfo_ =
16460                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
16461              } else {
16462                reqInfo_ = value;
16463              }
16464              onChanged();
16465            } else {
16466              reqInfoBuilder_.mergeFrom(value);
16467            }
16468            bitField0_ |= 0x00000001;
16469            return this;
16470          }
16471          /**
16472           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16473           */
16474          public Builder clearReqInfo() {
16475            if (reqInfoBuilder_ == null) {
16476              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16477              onChanged();
16478            } else {
16479              reqInfoBuilder_.clear();
16480            }
16481            bitField0_ = (bitField0_ & ~0x00000001);
16482            return this;
16483          }
16484          /**
16485           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16486           */
16487          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
16488            bitField0_ |= 0x00000001;
16489            onChanged();
16490            return getReqInfoFieldBuilder().getBuilder();
16491          }
16492          /**
16493           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16494           */
16495          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
16496            if (reqInfoBuilder_ != null) {
16497              return reqInfoBuilder_.getMessageOrBuilder();
16498            } else {
16499              return reqInfo_;
16500            }
16501          }
16502          /**
16503           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16504           */
16505          private com.google.protobuf.SingleFieldBuilder<
16506              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
16507              getReqInfoFieldBuilder() {
16508            if (reqInfoBuilder_ == null) {
16509              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16510                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
16511                      reqInfo_,
16512                      getParentForChildren(),
16513                      isClean());
16514              reqInfo_ = null;
16515            }
16516            return reqInfoBuilder_;
16517          }
16518    
16519          // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16520          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16521          private com.google.protobuf.SingleFieldBuilder<
16522              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
16523          /**
16524           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16525           *
16526           * <pre>
16527           ** Details on the segment to recover 
16528           * </pre>
16529           */
16530          public boolean hasStateToAccept() {
16531            return ((bitField0_ & 0x00000002) == 0x00000002);
16532          }
16533          /**
16534           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16535           *
16536           * <pre>
16537           ** Details on the segment to recover 
16538           * </pre>
16539           */
16540          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
16541            if (stateToAcceptBuilder_ == null) {
16542              return stateToAccept_;
16543            } else {
16544              return stateToAcceptBuilder_.getMessage();
16545            }
16546          }
16547          /**
16548           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16549           *
16550           * <pre>
16551           ** Details on the segment to recover 
16552           * </pre>
16553           */
16554          public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16555            if (stateToAcceptBuilder_ == null) {
16556              if (value == null) {
16557                throw new NullPointerException();
16558              }
16559              stateToAccept_ = value;
16560              onChanged();
16561            } else {
16562              stateToAcceptBuilder_.setMessage(value);
16563            }
16564            bitField0_ |= 0x00000002;
16565            return this;
16566          }
16567          /**
16568           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16569           *
16570           * <pre>
16571           ** Details on the segment to recover 
16572           * </pre>
16573           */
16574          public Builder setStateToAccept(
16575              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
16576            if (stateToAcceptBuilder_ == null) {
16577              stateToAccept_ = builderForValue.build();
16578              onChanged();
16579            } else {
16580              stateToAcceptBuilder_.setMessage(builderForValue.build());
16581            }
16582            bitField0_ |= 0x00000002;
16583            return this;
16584          }
16585          /**
16586           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16587           *
16588           * <pre>
16589           ** Details on the segment to recover 
16590           * </pre>
16591           */
16592          public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16593            if (stateToAcceptBuilder_ == null) {
16594              if (((bitField0_ & 0x00000002) == 0x00000002) &&
16595                  stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
16596                stateToAccept_ =
16597                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
16598              } else {
16599                stateToAccept_ = value;
16600              }
16601              onChanged();
16602            } else {
16603              stateToAcceptBuilder_.mergeFrom(value);
16604            }
16605            bitField0_ |= 0x00000002;
16606            return this;
16607          }
16608          /**
16609           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16610           *
16611           * <pre>
16612           ** Details on the segment to recover 
16613           * </pre>
16614           */
16615          public Builder clearStateToAccept() {
16616            if (stateToAcceptBuilder_ == null) {
16617              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16618              onChanged();
16619            } else {
16620              stateToAcceptBuilder_.clear();
16621            }
16622            bitField0_ = (bitField0_ & ~0x00000002);
16623            return this;
16624          }
16625          /**
16626           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16627           *
16628           * <pre>
16629           ** Details on the segment to recover 
16630           * </pre>
16631           */
16632          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
16633            bitField0_ |= 0x00000002;
16634            onChanged();
16635            return getStateToAcceptFieldBuilder().getBuilder();
16636          }
16637          /**
16638           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16639           *
16640           * <pre>
16641           ** Details on the segment to recover 
16642           * </pre>
16643           */
16644          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
16645            if (stateToAcceptBuilder_ != null) {
16646              return stateToAcceptBuilder_.getMessageOrBuilder();
16647            } else {
16648              return stateToAccept_;
16649            }
16650          }
16651          /**
16652           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16653           *
16654           * <pre>
16655           ** Details on the segment to recover 
16656           * </pre>
16657           */
16658          private com.google.protobuf.SingleFieldBuilder<
16659              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
16660              getStateToAcceptFieldBuilder() {
16661            if (stateToAcceptBuilder_ == null) {
16662              stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16663                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
16664                      stateToAccept_,
16665                      getParentForChildren(),
16666                      isClean());
16667              stateToAccept_ = null;
16668            }
16669            return stateToAcceptBuilder_;
16670          }
16671    
16672          // required string fromURL = 3;
16673          private java.lang.Object fromURL_ = "";
16674          /**
16675           * <code>required string fromURL = 3;</code>
16676           *
16677           * <pre>
16678           ** The URL from which the log may be copied 
16679           * </pre>
16680           */
16681          public boolean hasFromURL() {
16682            return ((bitField0_ & 0x00000004) == 0x00000004);
16683          }
16684          /**
16685           * <code>required string fromURL = 3;</code>
16686           *
16687           * <pre>
16688           ** The URL from which the log may be copied 
16689           * </pre>
16690           */
16691          public java.lang.String getFromURL() {
16692            java.lang.Object ref = fromURL_;
16693            if (!(ref instanceof java.lang.String)) {
16694              java.lang.String s = ((com.google.protobuf.ByteString) ref)
16695                  .toStringUtf8();
16696              fromURL_ = s;
16697              return s;
16698            } else {
16699              return (java.lang.String) ref;
16700            }
16701          }
16702          /**
16703           * <code>required string fromURL = 3;</code>
16704           *
16705           * <pre>
16706           ** The URL from which the log may be copied 
16707           * </pre>
16708           */
16709          public com.google.protobuf.ByteString
16710              getFromURLBytes() {
16711            java.lang.Object ref = fromURL_;
16712            if (ref instanceof String) {
16713              com.google.protobuf.ByteString b = 
16714                  com.google.protobuf.ByteString.copyFromUtf8(
16715                      (java.lang.String) ref);
16716              fromURL_ = b;
16717              return b;
16718            } else {
16719              return (com.google.protobuf.ByteString) ref;
16720            }
16721          }
16722          /**
16723           * <code>required string fromURL = 3;</code>
16724           *
16725           * <pre>
16726           ** The URL from which the log may be copied 
16727           * </pre>
16728           */
16729          public Builder setFromURL(
16730              java.lang.String value) {
16731            if (value == null) {
16732        throw new NullPointerException();
16733      }
16734      bitField0_ |= 0x00000004;
16735            fromURL_ = value;
16736            onChanged();
16737            return this;
16738          }
16739          /**
16740           * <code>required string fromURL = 3;</code>
16741           *
16742           * <pre>
16743           ** The URL from which the log may be copied 
16744           * </pre>
16745           */
16746          public Builder clearFromURL() {
16747            bitField0_ = (bitField0_ & ~0x00000004);
16748            fromURL_ = getDefaultInstance().getFromURL();
16749            onChanged();
16750            return this;
16751          }
16752          /**
16753           * <code>required string fromURL = 3;</code>
16754           *
16755           * <pre>
16756           ** The URL from which the log may be copied 
16757           * </pre>
16758           */
16759          public Builder setFromURLBytes(
16760              com.google.protobuf.ByteString value) {
16761            if (value == null) {
16762        throw new NullPointerException();
16763      }
16764      bitField0_ |= 0x00000004;
16765            fromURL_ = value;
16766            onChanged();
16767            return this;
16768          }
16769    
16770          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
16771        }
16772    
16773        static {
16774          defaultInstance = new AcceptRecoveryRequestProto(true);
16775          defaultInstance.initFields();
16776        }
16777    
16778        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
16779      }
16780    
16781      public interface AcceptRecoveryResponseProtoOrBuilder
16782          extends com.google.protobuf.MessageOrBuilder {
16783      }
16784      /**
16785       * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
16786       */
16787      public static final class AcceptRecoveryResponseProto extends
16788          com.google.protobuf.GeneratedMessage
16789          implements AcceptRecoveryResponseProtoOrBuilder {
16790        // Use AcceptRecoveryResponseProto.newBuilder() to construct.
16791        private AcceptRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16792          super(builder);
16793          this.unknownFields = builder.getUnknownFields();
16794        }
16795        private AcceptRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16796    
16797        private static final AcceptRecoveryResponseProto defaultInstance;
16798        public static AcceptRecoveryResponseProto getDefaultInstance() {
16799          return defaultInstance;
16800        }
16801    
16802        public AcceptRecoveryResponseProto getDefaultInstanceForType() {
16803          return defaultInstance;
16804        }
16805    
16806        private final com.google.protobuf.UnknownFieldSet unknownFields;
16807        @java.lang.Override
16808        public final com.google.protobuf.UnknownFieldSet
16809            getUnknownFields() {
16810          return this.unknownFields;
16811        }
16812        private AcceptRecoveryResponseProto(
16813            com.google.protobuf.CodedInputStream input,
16814            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16815            throws com.google.protobuf.InvalidProtocolBufferException {
16816          initFields();
16817          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16818              com.google.protobuf.UnknownFieldSet.newBuilder();
16819          try {
16820            boolean done = false;
16821            while (!done) {
16822              int tag = input.readTag();
16823              switch (tag) {
16824                case 0:
16825                  done = true;
16826                  break;
16827                default: {
16828                  if (!parseUnknownField(input, unknownFields,
16829                                         extensionRegistry, tag)) {
16830                    done = true;
16831                  }
16832                  break;
16833                }
16834              }
16835            }
16836          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16837            throw e.setUnfinishedMessage(this);
16838          } catch (java.io.IOException e) {
16839            throw new com.google.protobuf.InvalidProtocolBufferException(
16840                e.getMessage()).setUnfinishedMessage(this);
16841          } finally {
16842            this.unknownFields = unknownFields.build();
16843            makeExtensionsImmutable();
16844          }
16845        }
16846        public static final com.google.protobuf.Descriptors.Descriptor
16847            getDescriptor() {
16848          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
16849        }
16850    
16851        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16852            internalGetFieldAccessorTable() {
16853          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
16854              .ensureFieldAccessorsInitialized(
16855                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
16856        }
16857    
16858        public static com.google.protobuf.Parser<AcceptRecoveryResponseProto> PARSER =
16859            new com.google.protobuf.AbstractParser<AcceptRecoveryResponseProto>() {
16860          public AcceptRecoveryResponseProto parsePartialFrom(
16861              com.google.protobuf.CodedInputStream input,
16862              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16863              throws com.google.protobuf.InvalidProtocolBufferException {
16864            return new AcceptRecoveryResponseProto(input, extensionRegistry);
16865          }
16866        };
16867    
16868        @java.lang.Override
16869        public com.google.protobuf.Parser<AcceptRecoveryResponseProto> getParserForType() {
16870          return PARSER;
16871        }
16872    
16873        private void initFields() {
16874        }
16875        private byte memoizedIsInitialized = -1;
16876        public final boolean isInitialized() {
16877          byte isInitialized = memoizedIsInitialized;
16878          if (isInitialized != -1) return isInitialized == 1;
16879    
16880          memoizedIsInitialized = 1;
16881          return true;
16882        }
16883    
16884        public void writeTo(com.google.protobuf.CodedOutputStream output)
16885                            throws java.io.IOException {
16886          getSerializedSize();
16887          getUnknownFields().writeTo(output);
16888        }
16889    
16890        private int memoizedSerializedSize = -1;
16891        public int getSerializedSize() {
16892          int size = memoizedSerializedSize;
16893          if (size != -1) return size;
16894    
16895          size = 0;
16896          size += getUnknownFields().getSerializedSize();
16897          memoizedSerializedSize = size;
16898          return size;
16899        }
16900    
16901        private static final long serialVersionUID = 0L;
16902        @java.lang.Override
16903        protected java.lang.Object writeReplace()
16904            throws java.io.ObjectStreamException {
16905          return super.writeReplace();
16906        }
16907    
16908        @java.lang.Override
16909        public boolean equals(final java.lang.Object obj) {
16910          if (obj == this) {
16911           return true;
16912          }
16913          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
16914            return super.equals(obj);
16915          }
16916          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
16917    
16918          boolean result = true;
16919          result = result &&
16920              getUnknownFields().equals(other.getUnknownFields());
16921          return result;
16922        }
16923    
16924        private int memoizedHashCode = 0;
16925        @java.lang.Override
16926        public int hashCode() {
16927          if (memoizedHashCode != 0) {
16928            return memoizedHashCode;
16929          }
16930          int hash = 41;
16931          hash = (19 * hash) + getDescriptorForType().hashCode();
16932          hash = (29 * hash) + getUnknownFields().hashCode();
16933          memoizedHashCode = hash;
16934          return hash;
16935        }
16936    
16937        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16938            com.google.protobuf.ByteString data)
16939            throws com.google.protobuf.InvalidProtocolBufferException {
16940          return PARSER.parseFrom(data);
16941        }
16942        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16943            com.google.protobuf.ByteString data,
16944            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16945            throws com.google.protobuf.InvalidProtocolBufferException {
16946          return PARSER.parseFrom(data, extensionRegistry);
16947        }
16948        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
16949            throws com.google.protobuf.InvalidProtocolBufferException {
16950          return PARSER.parseFrom(data);
16951        }
16952        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16953            byte[] data,
16954            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16955            throws com.google.protobuf.InvalidProtocolBufferException {
16956          return PARSER.parseFrom(data, extensionRegistry);
16957        }
16958        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
16959            throws java.io.IOException {
16960          return PARSER.parseFrom(input);
16961        }
16962        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16963            java.io.InputStream input,
16964            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16965            throws java.io.IOException {
16966          return PARSER.parseFrom(input, extensionRegistry);
16967        }
16968        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
16969            throws java.io.IOException {
16970          return PARSER.parseDelimitedFrom(input);
16971        }
16972        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
16973            java.io.InputStream input,
16974            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16975            throws java.io.IOException {
16976          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16977        }
16978        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16979            com.google.protobuf.CodedInputStream input)
16980            throws java.io.IOException {
16981          return PARSER.parseFrom(input);
16982        }
16983        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16984            com.google.protobuf.CodedInputStream input,
16985            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16986            throws java.io.IOException {
16987          return PARSER.parseFrom(input, extensionRegistry);
16988        }
16989    
16990        public static Builder newBuilder() { return Builder.create(); }
16991        public Builder newBuilderForType() { return newBuilder(); }
16992        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
16993          return newBuilder().mergeFrom(prototype);
16994        }
16995        public Builder toBuilder() { return newBuilder(this); }
16996    
16997        @java.lang.Override
16998        protected Builder newBuilderForType(
16999            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17000          Builder builder = new Builder(parent);
17001          return builder;
17002        }
17003        /**
17004         * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
17005         */
17006        public static final class Builder extends
17007            com.google.protobuf.GeneratedMessage.Builder<Builder>
17008           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
17009          public static final com.google.protobuf.Descriptors.Descriptor
17010              getDescriptor() {
17011            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17012          }
17013    
17014          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17015              internalGetFieldAccessorTable() {
17016            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
17017                .ensureFieldAccessorsInitialized(
17018                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
17019          }
17020    
17021          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
17022          private Builder() {
17023            maybeForceBuilderInitialization();
17024          }
17025    
17026          private Builder(
17027              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17028            super(parent);
17029            maybeForceBuilderInitialization();
17030          }
17031          private void maybeForceBuilderInitialization() {
17032            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
17033            }
17034          }
17035          private static Builder create() {
17036            return new Builder();
17037          }
17038    
17039          public Builder clear() {
17040            super.clear();
17041            return this;
17042          }
17043    
17044          public Builder clone() {
17045            return create().mergeFrom(buildPartial());
17046          }
17047    
17048          public com.google.protobuf.Descriptors.Descriptor
17049              getDescriptorForType() {
17050            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17051          }
17052    
17053          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
17054            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17055          }
17056    
17057          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
17058            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
17059            if (!result.isInitialized()) {
17060              throw newUninitializedMessageException(result);
17061            }
17062            return result;
17063          }
17064    
17065          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
17066            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
17067            onBuilt();
17068            return result;
17069          }
17070    
17071          public Builder mergeFrom(com.google.protobuf.Message other) {
17072            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
17073              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
17074            } else {
17075              super.mergeFrom(other);
17076              return this;
17077            }
17078          }
17079    
17080          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
17081            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
17082            this.mergeUnknownFields(other.getUnknownFields());
17083            return this;
17084          }
17085    
17086          public final boolean isInitialized() {
17087            return true;
17088          }
17089    
17090          public Builder mergeFrom(
17091              com.google.protobuf.CodedInputStream input,
17092              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17093              throws java.io.IOException {
17094            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parsedMessage = null;
17095            try {
17096              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17097            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17098              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) e.getUnfinishedMessage();
17099              throw e;
17100            } finally {
17101              if (parsedMessage != null) {
17102                mergeFrom(parsedMessage);
17103              }
17104            }
17105            return this;
17106          }
17107    
17108          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17109        }
17110    
17111        static {
17112          defaultInstance = new AcceptRecoveryResponseProto(true);
17113          defaultInstance.initFields();
17114        }
17115    
17116        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17117      }
17118    
17119      /**
17120       * Protobuf service {@code hadoop.hdfs.QJournalProtocolService}
17121       *
17122       * <pre>
17123       **
17124       * Protocol used to journal edits to a JournalNode.
17125       * See the request and response for details of rpc call.
17126       * </pre>
17127       */
17128      public static abstract class QJournalProtocolService
17129          implements com.google.protobuf.Service {
17130        protected QJournalProtocolService() {}
17131    
17132        public interface Interface {
17133          /**
17134           * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17135           */
17136          public abstract void isFormatted(
17137              com.google.protobuf.RpcController controller,
17138              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17139              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17140    
17141          /**
17142           * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17143           */
17144          public abstract void getJournalState(
17145              com.google.protobuf.RpcController controller,
17146              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17147              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17148    
17149          /**
17150           * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17151           */
17152          public abstract void newEpoch(
17153              com.google.protobuf.RpcController controller,
17154              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17155              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17156    
17157          /**
17158           * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17159           */
17160          public abstract void format(
17161              com.google.protobuf.RpcController controller,
17162              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17163              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17164    
17165          /**
17166           * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17167           */
17168          public abstract void journal(
17169              com.google.protobuf.RpcController controller,
17170              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17171              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17172    
17173          /**
17174           * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17175           */
17176          public abstract void heartbeat(
17177              com.google.protobuf.RpcController controller,
17178              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17179              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17180    
17181          /**
17182           * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17183           */
17184          public abstract void startLogSegment(
17185              com.google.protobuf.RpcController controller,
17186              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17187              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17188    
17189          /**
17190           * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17191           */
17192          public abstract void finalizeLogSegment(
17193              com.google.protobuf.RpcController controller,
17194              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17195              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17196    
17197          /**
17198           * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17199           */
17200          public abstract void purgeLogs(
17201              com.google.protobuf.RpcController controller,
17202              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17203              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17204    
17205          /**
17206           * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17207           */
17208          public abstract void getEditLogManifest(
17209              com.google.protobuf.RpcController controller,
17210              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17211              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17212    
17213          /**
17214           * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17215           */
17216          public abstract void prepareRecovery(
17217              com.google.protobuf.RpcController controller,
17218              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17219              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17220    
17221          /**
17222           * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17223           */
17224          public abstract void acceptRecovery(
17225              com.google.protobuf.RpcController controller,
17226              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17227              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17228    
17229        }
17230    
17231        public static com.google.protobuf.Service newReflectiveService(
17232            final Interface impl) {
17233          return new QJournalProtocolService() {
17234            @java.lang.Override
17235            public  void isFormatted(
17236                com.google.protobuf.RpcController controller,
17237                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17238                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17239              impl.isFormatted(controller, request, done);
17240            }
17241    
17242            @java.lang.Override
17243            public  void getJournalState(
17244                com.google.protobuf.RpcController controller,
17245                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17246                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17247              impl.getJournalState(controller, request, done);
17248            }
17249    
17250            @java.lang.Override
17251            public  void newEpoch(
17252                com.google.protobuf.RpcController controller,
17253                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17254                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17255              impl.newEpoch(controller, request, done);
17256            }
17257    
17258            @java.lang.Override
17259            public  void format(
17260                com.google.protobuf.RpcController controller,
17261                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17262                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17263              impl.format(controller, request, done);
17264            }
17265    
17266            @java.lang.Override
17267            public  void journal(
17268                com.google.protobuf.RpcController controller,
17269                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17270                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17271              impl.journal(controller, request, done);
17272            }
17273    
17274            @java.lang.Override
17275            public  void heartbeat(
17276                com.google.protobuf.RpcController controller,
17277                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17278                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17279              impl.heartbeat(controller, request, done);
17280            }
17281    
17282            @java.lang.Override
17283            public  void startLogSegment(
17284                com.google.protobuf.RpcController controller,
17285                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17286                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17287              impl.startLogSegment(controller, request, done);
17288            }
17289    
17290            @java.lang.Override
17291            public  void finalizeLogSegment(
17292                com.google.protobuf.RpcController controller,
17293                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17294                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17295              impl.finalizeLogSegment(controller, request, done);
17296            }
17297    
17298            @java.lang.Override
17299            public  void purgeLogs(
17300                com.google.protobuf.RpcController controller,
17301                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17302                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17303              impl.purgeLogs(controller, request, done);
17304            }
17305    
17306            @java.lang.Override
17307            public  void getEditLogManifest(
17308                com.google.protobuf.RpcController controller,
17309                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17310                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17311              impl.getEditLogManifest(controller, request, done);
17312            }
17313    
17314            @java.lang.Override
17315            public  void prepareRecovery(
17316                com.google.protobuf.RpcController controller,
17317                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17318                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17319              impl.prepareRecovery(controller, request, done);
17320            }
17321    
17322            @java.lang.Override
17323            public  void acceptRecovery(
17324                com.google.protobuf.RpcController controller,
17325                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17326                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17327              impl.acceptRecovery(controller, request, done);
17328            }
17329    
17330          };
17331        }
17332    
17333        public static com.google.protobuf.BlockingService
17334            newReflectiveBlockingService(final BlockingInterface impl) {
17335          return new com.google.protobuf.BlockingService() {
17336            public final com.google.protobuf.Descriptors.ServiceDescriptor
17337                getDescriptorForType() {
17338              return getDescriptor();
17339            }
17340    
17341            public final com.google.protobuf.Message callBlockingMethod(
17342                com.google.protobuf.Descriptors.MethodDescriptor method,
17343                com.google.protobuf.RpcController controller,
17344                com.google.protobuf.Message request)
17345                throws com.google.protobuf.ServiceException {
17346              if (method.getService() != getDescriptor()) {
17347                throw new java.lang.IllegalArgumentException(
17348                  "Service.callBlockingMethod() given method descriptor for " +
17349                  "wrong service type.");
17350              }
17351              switch(method.getIndex()) {
17352                case 0:
17353                  return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
17354                case 1:
17355                  return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
17356                case 2:
17357                  return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
17358                case 3:
17359                  return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
17360                case 4:
17361                  return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
17362                case 5:
17363                  return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
17364                case 6:
17365                  return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
17366                case 7:
17367                  return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
17368                case 8:
17369                  return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
17370                case 9:
17371                  return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
17372                case 10:
17373                  return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
17374                case 11:
17375                  return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
17376                default:
17377                  throw new java.lang.AssertionError("Can't get here.");
17378              }
17379            }
17380    
17381            public final com.google.protobuf.Message
17382                getRequestPrototype(
17383                com.google.protobuf.Descriptors.MethodDescriptor method) {
17384              if (method.getService() != getDescriptor()) {
17385                throw new java.lang.IllegalArgumentException(
17386                  "Service.getRequestPrototype() given method " +
17387                  "descriptor for wrong service type.");
17388              }
17389              switch(method.getIndex()) {
17390                case 0:
17391                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17392                case 1:
17393                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17394                case 2:
17395                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17396                case 3:
17397                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17398                case 4:
17399                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17400                case 5:
17401                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17402                case 6:
17403                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17404                case 7:
17405                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17406                case 8:
17407                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17408                case 9:
17409                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17410                case 10:
17411                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17412                case 11:
17413                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17414                default:
17415                  throw new java.lang.AssertionError("Can't get here.");
17416              }
17417            }
17418    
17419            public final com.google.protobuf.Message
17420                getResponsePrototype(
17421                com.google.protobuf.Descriptors.MethodDescriptor method) {
17422              if (method.getService() != getDescriptor()) {
17423                throw new java.lang.IllegalArgumentException(
17424                  "Service.getResponsePrototype() given method " +
17425                  "descriptor for wrong service type.");
17426              }
17427              switch(method.getIndex()) {
17428                case 0:
17429                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17430                case 1:
17431                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17432                case 2:
17433                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17434                case 3:
17435                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17436                case 4:
17437                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17438                case 5:
17439                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17440                case 6:
17441                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17442                case 7:
17443                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17444                case 8:
17445                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17446                case 9:
17447                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17448                case 10:
17449                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17450                case 11:
17451                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17452                default:
17453                  throw new java.lang.AssertionError("Can't get here.");
17454              }
17455            }
17456    
17457          };
17458        }
17459    
17460        /**
17461         * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17462         */
17463        public abstract void isFormatted(
17464            com.google.protobuf.RpcController controller,
17465            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17466            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17467    
17468        /**
17469         * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17470         */
17471        public abstract void getJournalState(
17472            com.google.protobuf.RpcController controller,
17473            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17474            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17475    
17476        /**
17477         * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17478         */
17479        public abstract void newEpoch(
17480            com.google.protobuf.RpcController controller,
17481            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17482            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17483    
17484        /**
17485         * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17486         */
17487        public abstract void format(
17488            com.google.protobuf.RpcController controller,
17489            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17490            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17491    
17492        /**
17493         * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17494         */
17495        public abstract void journal(
17496            com.google.protobuf.RpcController controller,
17497            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17498            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17499    
17500        /**
17501         * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17502         */
17503        public abstract void heartbeat(
17504            com.google.protobuf.RpcController controller,
17505            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17506            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17507    
17508        /**
17509         * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17510         */
17511        public abstract void startLogSegment(
17512            com.google.protobuf.RpcController controller,
17513            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17514            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17515    
17516        /**
17517         * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17518         */
17519        public abstract void finalizeLogSegment(
17520            com.google.protobuf.RpcController controller,
17521            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17522            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17523    
17524        /**
17525         * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17526         */
17527        public abstract void purgeLogs(
17528            com.google.protobuf.RpcController controller,
17529            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17530            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17531    
17532        /**
17533         * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17534         */
17535        public abstract void getEditLogManifest(
17536            com.google.protobuf.RpcController controller,
17537            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17538            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17539    
17540        /**
17541         * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17542         */
17543        public abstract void prepareRecovery(
17544            com.google.protobuf.RpcController controller,
17545            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17546            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17547    
17548        /**
17549         * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17550         */
17551        public abstract void acceptRecovery(
17552            com.google.protobuf.RpcController controller,
17553            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17554            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17555    
17556        public static final
17557            com.google.protobuf.Descriptors.ServiceDescriptor
17558            getDescriptor() {
17559          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
17560        }
17561        public final com.google.protobuf.Descriptors.ServiceDescriptor
17562            getDescriptorForType() {
17563          return getDescriptor();
17564        }
17565    
17566        public final void callMethod(
17567            com.google.protobuf.Descriptors.MethodDescriptor method,
17568            com.google.protobuf.RpcController controller,
17569            com.google.protobuf.Message request,
17570            com.google.protobuf.RpcCallback<
17571              com.google.protobuf.Message> done) {
17572          if (method.getService() != getDescriptor()) {
17573            throw new java.lang.IllegalArgumentException(
17574              "Service.callMethod() given method descriptor for wrong " +
17575              "service type.");
17576          }
17577          switch(method.getIndex()) {
17578            case 0:
17579              this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
17580                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto>specializeCallback(
17581                  done));
17582              return;
17583            case 1:
17584              this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
17585                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto>specializeCallback(
17586                  done));
17587              return;
17588            case 2:
17589              this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
17590                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto>specializeCallback(
17591                  done));
17592              return;
17593            case 3:
17594              this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
17595                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto>specializeCallback(
17596                  done));
17597              return;
17598            case 4:
17599              this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
17600                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto>specializeCallback(
17601                  done));
17602              return;
17603            case 5:
17604              this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
17605                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto>specializeCallback(
17606                  done));
17607              return;
17608            case 6:
17609              this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
17610                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
17611                  done));
17612              return;
17613            case 7:
17614              this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
17615                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto>specializeCallback(
17616                  done));
17617              return;
17618            case 8:
17619              this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
17620                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto>specializeCallback(
17621                  done));
17622              return;
17623            case 9:
17624              this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
17625                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto>specializeCallback(
17626                  done));
17627              return;
17628            case 10:
17629              this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
17630                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto>specializeCallback(
17631                  done));
17632              return;
17633            case 11:
17634              this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
17635                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto>specializeCallback(
17636                  done));
17637              return;
17638            default:
17639              throw new java.lang.AssertionError("Can't get here.");
17640          }
17641        }
17642    
17643        public final com.google.protobuf.Message
17644            getRequestPrototype(
17645            com.google.protobuf.Descriptors.MethodDescriptor method) {
17646          if (method.getService() != getDescriptor()) {
17647            throw new java.lang.IllegalArgumentException(
17648              "Service.getRequestPrototype() given method " +
17649              "descriptor for wrong service type.");
17650          }
17651          switch(method.getIndex()) {
17652            case 0:
17653              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17654            case 1:
17655              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17656            case 2:
17657              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17658            case 3:
17659              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17660            case 4:
17661              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17662            case 5:
17663              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17664            case 6:
17665              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17666            case 7:
17667              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17668            case 8:
17669              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17670            case 9:
17671              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17672            case 10:
17673              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17674            case 11:
17675              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17676            default:
17677              throw new java.lang.AssertionError("Can't get here.");
17678          }
17679        }
17680    
17681        public final com.google.protobuf.Message
17682            getResponsePrototype(
17683            com.google.protobuf.Descriptors.MethodDescriptor method) {
17684          if (method.getService() != getDescriptor()) {
17685            throw new java.lang.IllegalArgumentException(
17686              "Service.getResponsePrototype() given method " +
17687              "descriptor for wrong service type.");
17688          }
17689          switch(method.getIndex()) {
17690            case 0:
17691              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17692            case 1:
17693              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17694            case 2:
17695              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17696            case 3:
17697              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17698            case 4:
17699              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17700            case 5:
17701              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17702            case 6:
17703              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17704            case 7:
17705              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17706            case 8:
17707              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17708            case 9:
17709              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17710            case 10:
17711              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17712            case 11:
17713              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17714            default:
17715              throw new java.lang.AssertionError("Can't get here.");
17716          }
17717        }
17718    
17719        public static Stub newStub(
17720            com.google.protobuf.RpcChannel channel) {
17721          return new Stub(channel);
17722        }
17723    
17724        public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
17725          private Stub(com.google.protobuf.RpcChannel channel) {
17726            this.channel = channel;
17727          }
17728    
17729          private final com.google.protobuf.RpcChannel channel;
17730    
17731          public com.google.protobuf.RpcChannel getChannel() {
17732            return channel;
17733          }
17734    
17735          public  void isFormatted(
17736              com.google.protobuf.RpcController controller,
17737              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17738              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17739            channel.callMethod(
17740              getDescriptor().getMethods().get(0),
17741              controller,
17742              request,
17743              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
17744              com.google.protobuf.RpcUtil.generalizeCallback(
17745                done,
17746                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
17747                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
17748          }
17749    
17750          public  void getJournalState(
17751              com.google.protobuf.RpcController controller,
17752              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17753              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17754            channel.callMethod(
17755              getDescriptor().getMethods().get(1),
17756              controller,
17757              request,
17758              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
17759              com.google.protobuf.RpcUtil.generalizeCallback(
17760                done,
17761                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
17762                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
17763          }
17764    
17765          public  void newEpoch(
17766              com.google.protobuf.RpcController controller,
17767              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17768              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17769            channel.callMethod(
17770              getDescriptor().getMethods().get(2),
17771              controller,
17772              request,
17773              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
17774              com.google.protobuf.RpcUtil.generalizeCallback(
17775                done,
17776                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
17777                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
17778          }
17779    
17780          public  void format(
17781              com.google.protobuf.RpcController controller,
17782              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17783              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17784            channel.callMethod(
17785              getDescriptor().getMethods().get(3),
17786              controller,
17787              request,
17788              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
17789              com.google.protobuf.RpcUtil.generalizeCallback(
17790                done,
17791                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
17792                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
17793          }
17794    
17795          public  void journal(
17796              com.google.protobuf.RpcController controller,
17797              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17798              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17799            channel.callMethod(
17800              getDescriptor().getMethods().get(4),
17801              controller,
17802              request,
17803              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
17804              com.google.protobuf.RpcUtil.generalizeCallback(
17805                done,
17806                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
17807                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
17808          }
17809    
17810          public  void heartbeat(
17811              com.google.protobuf.RpcController controller,
17812              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17813              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17814            channel.callMethod(
17815              getDescriptor().getMethods().get(5),
17816              controller,
17817              request,
17818              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
17819              com.google.protobuf.RpcUtil.generalizeCallback(
17820                done,
17821                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
17822                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
17823          }
17824    
17825          public  void startLogSegment(
17826              com.google.protobuf.RpcController controller,
17827              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17828              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17829            channel.callMethod(
17830              getDescriptor().getMethods().get(6),
17831              controller,
17832              request,
17833              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
17834              com.google.protobuf.RpcUtil.generalizeCallback(
17835                done,
17836                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
17837                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
17838          }
17839    
17840          public  void finalizeLogSegment(
17841              com.google.protobuf.RpcController controller,
17842              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17843              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17844            channel.callMethod(
17845              getDescriptor().getMethods().get(7),
17846              controller,
17847              request,
17848              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
17849              com.google.protobuf.RpcUtil.generalizeCallback(
17850                done,
17851                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
17852                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
17853          }
17854    
17855          public  void purgeLogs(
17856              com.google.protobuf.RpcController controller,
17857              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17858              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17859            channel.callMethod(
17860              getDescriptor().getMethods().get(8),
17861              controller,
17862              request,
17863              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
17864              com.google.protobuf.RpcUtil.generalizeCallback(
17865                done,
17866                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
17867                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
17868          }
17869    
17870          public  void getEditLogManifest(
17871              com.google.protobuf.RpcController controller,
17872              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17873              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17874            channel.callMethod(
17875              getDescriptor().getMethods().get(9),
17876              controller,
17877              request,
17878              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
17879              com.google.protobuf.RpcUtil.generalizeCallback(
17880                done,
17881                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
17882                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
17883          }
17884    
17885          public  void prepareRecovery(
17886              com.google.protobuf.RpcController controller,
17887              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17888              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17889            channel.callMethod(
17890              getDescriptor().getMethods().get(10),
17891              controller,
17892              request,
17893              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
17894              com.google.protobuf.RpcUtil.generalizeCallback(
17895                done,
17896                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
17897                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
17898          }
17899    
17900          public  void acceptRecovery(
17901              com.google.protobuf.RpcController controller,
17902              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17903              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17904            channel.callMethod(
17905              getDescriptor().getMethods().get(11),
17906              controller,
17907              request,
17908              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
17909              com.google.protobuf.RpcUtil.generalizeCallback(
17910                done,
17911                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
17912                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
17913          }
17914        }
17915    
17916        public static BlockingInterface newBlockingStub(
17917            com.google.protobuf.BlockingRpcChannel channel) {
17918          return new BlockingStub(channel);
17919        }
17920    
17921        public interface BlockingInterface {
17922          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
17923              com.google.protobuf.RpcController controller,
17924              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
17925              throws com.google.protobuf.ServiceException;
17926    
17927          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
17928              com.google.protobuf.RpcController controller,
17929              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
17930              throws com.google.protobuf.ServiceException;
17931    
17932          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
17933              com.google.protobuf.RpcController controller,
17934              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
17935              throws com.google.protobuf.ServiceException;
17936    
17937          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
17938              com.google.protobuf.RpcController controller,
17939              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
17940              throws com.google.protobuf.ServiceException;
17941    
17942          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
17943              com.google.protobuf.RpcController controller,
17944              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
17945              throws com.google.protobuf.ServiceException;
17946    
17947          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
17948              com.google.protobuf.RpcController controller,
17949              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
17950              throws com.google.protobuf.ServiceException;
17951    
17952          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
17953              com.google.protobuf.RpcController controller,
17954              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
17955              throws com.google.protobuf.ServiceException;
17956    
17957          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
17958              com.google.protobuf.RpcController controller,
17959              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
17960              throws com.google.protobuf.ServiceException;
17961    
17962          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
17963              com.google.protobuf.RpcController controller,
17964              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
17965              throws com.google.protobuf.ServiceException;
17966    
17967          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
17968              com.google.protobuf.RpcController controller,
17969              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
17970              throws com.google.protobuf.ServiceException;
17971    
17972          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
17973              com.google.protobuf.RpcController controller,
17974              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
17975              throws com.google.protobuf.ServiceException;
17976    
17977          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
17978              com.google.protobuf.RpcController controller,
17979              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
17980              throws com.google.protobuf.ServiceException;
17981        }
17982    
17983        private static final class BlockingStub implements BlockingInterface {
17984          private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
17985            this.channel = channel;
17986          }
17987    
17988          private final com.google.protobuf.BlockingRpcChannel channel;
17989    
17990          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
17991              com.google.protobuf.RpcController controller,
17992              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
17993              throws com.google.protobuf.ServiceException {
17994            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
17995              getDescriptor().getMethods().get(0),
17996              controller,
17997              request,
17998              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
17999          }
18000    
18001    
18002          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
18003              com.google.protobuf.RpcController controller,
18004              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
18005              throws com.google.protobuf.ServiceException {
18006            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
18007              getDescriptor().getMethods().get(1),
18008              controller,
18009              request,
18010              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
18011          }
18012    
18013    
18014          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
18015              com.google.protobuf.RpcController controller,
18016              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
18017              throws com.google.protobuf.ServiceException {
18018            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
18019              getDescriptor().getMethods().get(2),
18020              controller,
18021              request,
18022              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
18023          }
18024    
18025    
18026          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
18027              com.google.protobuf.RpcController controller,
18028              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
18029              throws com.google.protobuf.ServiceException {
18030            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
18031              getDescriptor().getMethods().get(3),
18032              controller,
18033              request,
18034              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
18035          }
18036    
18037    
18038          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
18039              com.google.protobuf.RpcController controller,
18040              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
18041              throws com.google.protobuf.ServiceException {
18042            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
18043              getDescriptor().getMethods().get(4),
18044              controller,
18045              request,
18046              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
18047          }
18048    
18049    
18050          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
18051              com.google.protobuf.RpcController controller,
18052              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
18053              throws com.google.protobuf.ServiceException {
18054            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
18055              getDescriptor().getMethods().get(5),
18056              controller,
18057              request,
18058              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
18059          }
18060    
18061    
18062          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
18063              com.google.protobuf.RpcController controller,
18064              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
18065              throws com.google.protobuf.ServiceException {
18066            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
18067              getDescriptor().getMethods().get(6),
18068              controller,
18069              request,
18070              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
18071          }
18072    
18073    
18074          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
18075              com.google.protobuf.RpcController controller,
18076              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
18077              throws com.google.protobuf.ServiceException {
18078            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
18079              getDescriptor().getMethods().get(7),
18080              controller,
18081              request,
18082              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
18083          }
18084    
18085    
18086          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
18087              com.google.protobuf.RpcController controller,
18088              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
18089              throws com.google.protobuf.ServiceException {
18090            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
18091              getDescriptor().getMethods().get(8),
18092              controller,
18093              request,
18094              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
18095          }
18096    
18097    
18098          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
18099              com.google.protobuf.RpcController controller,
18100              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
18101              throws com.google.protobuf.ServiceException {
18102            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
18103              getDescriptor().getMethods().get(9),
18104              controller,
18105              request,
18106              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
18107          }
18108    
18109    
18110          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
18111              com.google.protobuf.RpcController controller,
18112              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
18113              throws com.google.protobuf.ServiceException {
18114            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
18115              getDescriptor().getMethods().get(10),
18116              controller,
18117              request,
18118              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
18119          }
18120    
18121    
18122          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
18123              com.google.protobuf.RpcController controller,
18124              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
18125              throws com.google.protobuf.ServiceException {
18126            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
18127              getDescriptor().getMethods().get(11),
18128              controller,
18129              request,
18130              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
18131          }
18132    
18133        }
18134    
18135        // @@protoc_insertion_point(class_scope:hadoop.hdfs.QJournalProtocolService)
18136      }
18137    
18138      private static com.google.protobuf.Descriptors.Descriptor
18139        internal_static_hadoop_hdfs_JournalIdProto_descriptor;
18140      private static
18141        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18142          internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
18143      private static com.google.protobuf.Descriptors.Descriptor
18144        internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
18145      private static
18146        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18147          internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
18148      private static com.google.protobuf.Descriptors.Descriptor
18149        internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
18150      private static
18151        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18152          internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
18153      private static com.google.protobuf.Descriptors.Descriptor
18154        internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
18155      private static
18156        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18157          internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
18158      private static com.google.protobuf.Descriptors.Descriptor
18159        internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
18160      private static
18161        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18162          internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
18163      private static com.google.protobuf.Descriptors.Descriptor
18164        internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
18165      private static
18166        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18167          internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
18168      private static com.google.protobuf.Descriptors.Descriptor
18169        internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
18170      private static
18171        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18172          internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
18173      private static com.google.protobuf.Descriptors.Descriptor
18174        internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
18175      private static
18176        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18177          internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
18178      private static com.google.protobuf.Descriptors.Descriptor
18179        internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
18180      private static
18181        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18182          internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
18183      private static com.google.protobuf.Descriptors.Descriptor
18184        internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
18185      private static
18186        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18187          internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
18188      private static com.google.protobuf.Descriptors.Descriptor
18189        internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
18190      private static
18191        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18192          internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
18193      private static com.google.protobuf.Descriptors.Descriptor
18194        internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
18195      private static
18196        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18197          internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
18198      private static com.google.protobuf.Descriptors.Descriptor
18199        internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
18200      private static
18201        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18202          internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
18203      private static com.google.protobuf.Descriptors.Descriptor
18204        internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
18205      private static
18206        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18207          internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
18208      private static com.google.protobuf.Descriptors.Descriptor
18209        internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
18210      private static
18211        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18212          internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
18213      private static com.google.protobuf.Descriptors.Descriptor
18214        internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
18215      private static
18216        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18217          internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
18218      private static com.google.protobuf.Descriptors.Descriptor
18219        internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
18220      private static
18221        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18222          internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
18223      private static com.google.protobuf.Descriptors.Descriptor
18224        internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
18225      private static
18226        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18227          internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
18228      private static com.google.protobuf.Descriptors.Descriptor
18229        internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
18230      private static
18231        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18232          internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
18233      private static com.google.protobuf.Descriptors.Descriptor
18234        internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
18235      private static
18236        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18237          internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
18238      private static com.google.protobuf.Descriptors.Descriptor
18239        internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
18240      private static
18241        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18242          internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
18243      private static com.google.protobuf.Descriptors.Descriptor
18244        internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
18245      private static
18246        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18247          internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
18248      private static com.google.protobuf.Descriptors.Descriptor
18249        internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
18250      private static
18251        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18252          internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
18253      private static com.google.protobuf.Descriptors.Descriptor
18254        internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
18255      private static
18256        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18257          internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
18258      private static com.google.protobuf.Descriptors.Descriptor
18259        internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
18260      private static
18261        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18262          internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
18263      private static com.google.protobuf.Descriptors.Descriptor
18264        internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
18265      private static
18266        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18267          internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
18268      private static com.google.protobuf.Descriptors.Descriptor
18269        internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
18270      private static
18271        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18272          internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
18273      private static com.google.protobuf.Descriptors.Descriptor
18274        internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
18275      private static
18276        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18277          internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
18278    
18279      public static com.google.protobuf.Descriptors.FileDescriptor
18280          getDescriptor() {
18281        return descriptor;
18282      }
18283      private static com.google.protobuf.Descriptors.FileDescriptor
18284          descriptor;
18285      static {
18286        java.lang.String[] descriptorData = {
18287          "\n\026QJournalProtocol.proto\022\013hadoop.hdfs\032\nh" +
18288          "dfs.proto\"$\n\016JournalIdProto\022\022\n\nidentifie" +
18289          "r\030\001 \002(\t\"\201\001\n\020RequestInfoProto\022.\n\tjournalI" +
18290          "d\030\001 \002(\0132\033.hadoop.hdfs.JournalIdProto\022\r\n\005" +
18291          "epoch\030\002 \002(\004\022\027\n\017ipcSerialNumber\030\003 \002(\004\022\025\n\r" +
18292          "committedTxId\030\004 \001(\004\"M\n\021SegmentStateProto" +
18293          "\022\021\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\024\n\014" +
18294          "isInProgress\030\003 \002(\010\"k\n\032PersistedRecoveryP" +
18295          "axosData\0224\n\014segmentState\030\001 \002(\0132\036.hadoop." +
18296          "hdfs.SegmentStateProto\022\027\n\017acceptedInEpoc",
18297          "h\030\002 \002(\004\"\221\001\n\023JournalRequestProto\022.\n\007reqIn" +
18298          "fo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022" +
18299          "\022\n\nfirstTxnId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007" +
18300          "records\030\004 \002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024J" +
18301          "ournalResponseProto\"G\n\025HeartbeatRequestP" +
18302          "roto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Requ" +
18303          "estInfoProto\"\030\n\026HeartbeatResponseProto\"[" +
18304          "\n\033StartLogSegmentRequestProto\022.\n\007reqInfo" +
18305          "\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022\014\n" +
18306          "\004txid\030\002 \002(\004\"\036\n\034StartLogSegmentResponsePr",
18307          "oto\"t\n\036FinalizeLogSegmentRequestProto\022.\n" +
18308          "\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfo" +
18309          "Proto\022\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(" +
18310          "\004\"!\n\037FinalizeLogSegmentResponseProto\"^\n\025" +
18311          "PurgeLogsRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
18312          ".hadoop.hdfs.RequestInfoProto\022\025\n\rminTxId" +
18313          "ToKeep\030\002 \002(\004\"\030\n\026PurgeLogsResponseProto\"C" +
18314          "\n\027IsFormattedRequestProto\022(\n\003jid\030\001 \002(\0132\033" +
18315          ".hadoop.hdfs.JournalIdProto\"/\n\030IsFormatt" +
18316          "edResponseProto\022\023\n\013isFormatted\030\001 \002(\010\"G\n\033",
18317          "GetJournalStateRequestProto\022(\n\003jid\030\001 \002(\013" +
18318          "2\033.hadoop.hdfs.JournalIdProto\"K\n\034GetJour" +
18319          "nalStateResponseProto\022\031\n\021lastPromisedEpo" +
18320          "ch\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\"o\n\022FormatRequ" +
18321          "estProto\022(\n\003jid\030\001 \002(\0132\033.hadoop.hdfs.Jour" +
18322          "nalIdProto\022/\n\006nsInfo\030\002 \002(\0132\037.hadoop.hdfs" +
18323          ".NamespaceInfoProto\"\025\n\023FormatResponsePro" +
18324          "to\"\200\001\n\024NewEpochRequestProto\022(\n\003jid\030\001 \002(\013" +
18325          "2\033.hadoop.hdfs.JournalIdProto\022/\n\006nsInfo\030" +
18326          "\002 \002(\0132\037.hadoop.hdfs.NamespaceInfoProto\022\r",
18327          "\n\005epoch\030\003 \002(\004\"0\n\025NewEpochResponseProto\022\027" +
18328          "\n\017lastSegmentTxId\030\001 \001(\004\"z\n\036GetEditLogMan" +
18329          "ifestRequestProto\022(\n\003jid\030\001 \002(\0132\033.hadoop." +
18330          "hdfs.JournalIdProto\022\021\n\tsinceTxId\030\002 \002(\004\022\033" +
18331          "\n\014inProgressOk\030\004 \001(\010:\005false\"n\n\037GetEditLo" +
18332          "gManifestResponseProto\0229\n\010manifest\030\001 \002(\013" +
18333          "2\'.hadoop.hdfs.RemoteEditLogManifestProt" +
18334          "o\022\020\n\010httpPort\030\002 \002(\r\"b\n\033PrepareRecoveryRe" +
18335          "questProto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdf" +
18336          "s.RequestInfoProto\022\023\n\013segmentTxId\030\002 \002(\004\"",
18337          "\241\001\n\034PrepareRecoveryResponseProto\0224\n\014segm" +
18338          "entState\030\001 \001(\0132\036.hadoop.hdfs.SegmentStat" +
18339          "eProto\022\027\n\017acceptedInEpoch\030\002 \001(\004\022\027\n\017lastW" +
18340          "riterEpoch\030\003 \002(\004\022\031\n\021lastCommittedTxId\030\004 " +
18341          "\001(\004\"\224\001\n\032AcceptRecoveryRequestProto\022.\n\007re" +
18342          "qInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoPro" +
18343          "to\0225\n\rstateToAccept\030\002 \002(\0132\036.hadoop.hdfs." +
18344          "SegmentStateProto\022\017\n\007fromURL\030\003 \002(\t\"\035\n\033Ac" +
18345          "ceptRecoveryResponseProto2\220\t\n\027QJournalPr" +
18346          "otocolService\022Z\n\013isFormatted\022$.hadoop.hd",
18347          "fs.IsFormattedRequestProto\032%.hadoop.hdfs" +
18348          ".IsFormattedResponseProto\022f\n\017getJournalS" +
18349          "tate\022(.hadoop.hdfs.GetJournalStateReques" +
18350          "tProto\032).hadoop.hdfs.GetJournalStateResp" +
18351          "onseProto\022Q\n\010newEpoch\022!.hadoop.hdfs.NewE" +
18352          "pochRequestProto\032\".hadoop.hdfs.NewEpochR" +
18353          "esponseProto\022K\n\006format\022\037.hadoop.hdfs.For" +
18354          "matRequestProto\032 .hadoop.hdfs.FormatResp" +
18355          "onseProto\022N\n\007journal\022 .hadoop.hdfs.Journ" +
18356          "alRequestProto\032!.hadoop.hdfs.JournalResp",
18357          "onseProto\022T\n\theartbeat\022\".hadoop.hdfs.Hea" +
18358          "rtbeatRequestProto\032#.hadoop.hdfs.Heartbe" +
18359          "atResponseProto\022f\n\017startLogSegment\022(.had" +
18360          "oop.hdfs.StartLogSegmentRequestProto\032).h" +
18361          "adoop.hdfs.StartLogSegmentResponseProto\022" +
18362          "o\n\022finalizeLogSegment\022+.hadoop.hdfs.Fina" +
18363          "lizeLogSegmentRequestProto\032,.hadoop.hdfs" +
18364          ".FinalizeLogSegmentResponseProto\022T\n\tpurg" +
18365          "eLogs\022\".hadoop.hdfs.PurgeLogsRequestProt" +
18366          "o\032#.hadoop.hdfs.PurgeLogsResponseProto\022o",
18367          "\n\022getEditLogManifest\022+.hadoop.hdfs.GetEd" +
18368          "itLogManifestRequestProto\032,.hadoop.hdfs." +
18369          "GetEditLogManifestResponseProto\022f\n\017prepa" +
18370          "reRecovery\022(.hadoop.hdfs.PrepareRecovery" +
18371          "RequestProto\032).hadoop.hdfs.PrepareRecove" +
18372          "ryResponseProto\022c\n\016acceptRecovery\022\'.hado" +
18373          "op.hdfs.AcceptRecoveryRequestProto\032(.had" +
18374          "oop.hdfs.AcceptRecoveryResponseProtoBH\n(" +
18375          "org.apache.hadoop.hdfs.qjournal.protocol" +
18376          "B\026QJournalProtocolProtos\210\001\001\240\001\001"
18377        };
18378        com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
18379          new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
18380            public com.google.protobuf.ExtensionRegistry assignDescriptors(
18381                com.google.protobuf.Descriptors.FileDescriptor root) {
18382              descriptor = root;
18383              internal_static_hadoop_hdfs_JournalIdProto_descriptor =
18384                getDescriptor().getMessageTypes().get(0);
18385              internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable = new
18386                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18387                  internal_static_hadoop_hdfs_JournalIdProto_descriptor,
18388                  new java.lang.String[] { "Identifier", });
18389              internal_static_hadoop_hdfs_RequestInfoProto_descriptor =
18390                getDescriptor().getMessageTypes().get(1);
18391              internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable = new
18392                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18393                  internal_static_hadoop_hdfs_RequestInfoProto_descriptor,
18394                  new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", });
18395              internal_static_hadoop_hdfs_SegmentStateProto_descriptor =
18396                getDescriptor().getMessageTypes().get(2);
18397              internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable = new
18398                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18399                  internal_static_hadoop_hdfs_SegmentStateProto_descriptor,
18400                  new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", });
18401              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor =
18402                getDescriptor().getMessageTypes().get(3);
18403              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable = new
18404                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18405                  internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor,
18406                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", });
18407              internal_static_hadoop_hdfs_JournalRequestProto_descriptor =
18408                getDescriptor().getMessageTypes().get(4);
18409              internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable = new
18410                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18411                  internal_static_hadoop_hdfs_JournalRequestProto_descriptor,
18412                  new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", });
18413              internal_static_hadoop_hdfs_JournalResponseProto_descriptor =
18414                getDescriptor().getMessageTypes().get(5);
18415              internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable = new
18416                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18417                  internal_static_hadoop_hdfs_JournalResponseProto_descriptor,
18418                  new java.lang.String[] { });
18419              internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor =
18420                getDescriptor().getMessageTypes().get(6);
18421              internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable = new
18422                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18423                  internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor,
18424                  new java.lang.String[] { "ReqInfo", });
18425              internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor =
18426                getDescriptor().getMessageTypes().get(7);
18427              internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable = new
18428                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18429                  internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor,
18430                  new java.lang.String[] { });
18431              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor =
18432                getDescriptor().getMessageTypes().get(8);
18433              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable = new
18434                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18435                  internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor,
18436                  new java.lang.String[] { "ReqInfo", "Txid", });
18437              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor =
18438                getDescriptor().getMessageTypes().get(9);
18439              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable = new
18440                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18441                  internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor,
18442                  new java.lang.String[] { });
18443              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor =
18444                getDescriptor().getMessageTypes().get(10);
18445              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
18446                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18447                  internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor,
18448                  new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", });
18449              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor =
18450                getDescriptor().getMessageTypes().get(11);
18451              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
18452                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18453                  internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor,
18454                  new java.lang.String[] { });
18455              internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor =
18456                getDescriptor().getMessageTypes().get(12);
18457              internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable = new
18458                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18459                  internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor,
18460                  new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", });
18461              internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor =
18462                getDescriptor().getMessageTypes().get(13);
18463              internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable = new
18464                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18465                  internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor,
18466                  new java.lang.String[] { });
18467              internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor =
18468                getDescriptor().getMessageTypes().get(14);
18469              internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable = new
18470                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18471                  internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor,
18472                  new java.lang.String[] { "Jid", });
18473              internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor =
18474                getDescriptor().getMessageTypes().get(15);
18475              internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable = new
18476                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18477                  internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor,
18478                  new java.lang.String[] { "IsFormatted", });
18479              internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor =
18480                getDescriptor().getMessageTypes().get(16);
18481              internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable = new
18482                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18483                  internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor,
18484                  new java.lang.String[] { "Jid", });
18485              internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor =
18486                getDescriptor().getMessageTypes().get(17);
18487              internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable = new
18488                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18489                  internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor,
18490                  new java.lang.String[] { "LastPromisedEpoch", "HttpPort", });
18491              internal_static_hadoop_hdfs_FormatRequestProto_descriptor =
18492                getDescriptor().getMessageTypes().get(18);
18493              internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable = new
18494                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18495                  internal_static_hadoop_hdfs_FormatRequestProto_descriptor,
18496                  new java.lang.String[] { "Jid", "NsInfo", });
18497              internal_static_hadoop_hdfs_FormatResponseProto_descriptor =
18498                getDescriptor().getMessageTypes().get(19);
18499              internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable = new
18500                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18501                  internal_static_hadoop_hdfs_FormatResponseProto_descriptor,
18502                  new java.lang.String[] { });
18503              internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor =
18504                getDescriptor().getMessageTypes().get(20);
18505              internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable = new
18506                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18507                  internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor,
18508                  new java.lang.String[] { "Jid", "NsInfo", "Epoch", });
18509              internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor =
18510                getDescriptor().getMessageTypes().get(21);
18511              internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable = new
18512                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18513                  internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor,
18514                  new java.lang.String[] { "LastSegmentTxId", });
18515              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor =
18516                getDescriptor().getMessageTypes().get(22);
18517              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable = new
18518                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18519                  internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor,
18520                  new java.lang.String[] { "Jid", "SinceTxId", "InProgressOk", });
18521              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor =
18522                getDescriptor().getMessageTypes().get(23);
18523              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable = new
18524                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18525                  internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor,
18526                  new java.lang.String[] { "Manifest", "HttpPort", });
18527              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor =
18528                getDescriptor().getMessageTypes().get(24);
18529              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable = new
18530                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18531                  internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor,
18532                  new java.lang.String[] { "ReqInfo", "SegmentTxId", });
18533              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor =
18534                getDescriptor().getMessageTypes().get(25);
18535              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable = new
18536                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18537                  internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor,
18538                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", });
18539              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor =
18540                getDescriptor().getMessageTypes().get(26);
18541              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable = new
18542                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18543                  internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor,
18544                  new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", });
18545              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor =
18546                getDescriptor().getMessageTypes().get(27);
18547              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable = new
18548                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18549                  internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor,
18550                  new java.lang.String[] { });
18551              return null;
18552            }
18553          };
18554        com.google.protobuf.Descriptors.FileDescriptor
18555          .internalBuildGeneratedFileFrom(descriptorData,
18556            new com.google.protobuf.Descriptors.FileDescriptor[] {
18557              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
18558            }, assigner);
18559      }
18560    
18561      // @@protoc_insertion_point(outer_class_scope)
18562    }